1 | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS |
2 | // @generated by torchgen/gen.py from RegisterFunctionalization.cpp |
3 | |
4 | #include <ATen/core/LegacyTypeDispatch.h> |
5 | #include <ATen/EmptyTensor.h> |
6 | #include <ATen/FunctionalTensorWrapper.h> |
7 | #include <ATen/FunctionalInverses.h> |
8 | #include <torch/library.h> |
9 | |
10 | #ifndef AT_PER_OPERATOR_HEADERS |
11 | #include <ATen/Operators.h> |
12 | #include <ATen/NativeFunctions.h> |
13 | #else |
14 | // needed for the meta tensor calls to get stride info in functionalization |
15 | #include <ATen/ops/empty_strided_native.h> |
16 | // needed for special handling of copy_(). |
17 | // See Note [functionalizating copy_() and not preserving strides] |
18 | #include <ATen/ops/to_ops.h> |
19 | #include <ATen/ops/expand_copy_ops.h> |
20 | |
21 | #include <ATen/ops/_masked_scale_native.h> |
22 | #include <ATen/ops/_masked_scale_ops.h> |
23 | #include <ATen/ops/_masked_scale_native.h> |
24 | #include <ATen/ops/_masked_scale_ops.h> |
25 | #include <ATen/ops/native_dropout_native.h> |
26 | #include <ATen/ops/native_dropout_ops.h> |
27 | #include <ATen/ops/native_dropout_native.h> |
28 | #include <ATen/ops/native_dropout_ops.h> |
29 | #include <ATen/ops/native_dropout_backward_native.h> |
30 | #include <ATen/ops/native_dropout_backward_ops.h> |
31 | #include <ATen/ops/native_dropout_backward_native.h> |
32 | #include <ATen/ops/native_dropout_backward_ops.h> |
33 | #include <ATen/ops/asinh_native.h> |
34 | #include <ATen/ops/asinh_ops.h> |
35 | #include <ATen/ops/asinh_native.h> |
36 | #include <ATen/ops/asinh_ops.h> |
37 | #include <ATen/ops/asinh_native.h> |
38 | #include <ATen/ops/asinh_ops.h> |
39 | #include <ATen/ops/arctanh_native.h> |
40 | #include <ATen/ops/arctanh_ops.h> |
41 | #include <ATen/ops/arctanh_native.h> |
42 | #include <ATen/ops/arctanh_ops.h> |
43 | #include <ATen/ops/arctanh_native.h> |
44 | #include <ATen/ops/arctanh_ops.h> |
45 | #include <ATen/ops/baddbmm_native.h> |
46 | #include <ATen/ops/baddbmm_ops.h> |
47 | #include <ATen/ops/baddbmm_native.h> |
48 | #include <ATen/ops/baddbmm_ops.h> |
49 | #include <ATen/ops/baddbmm_native.h> |
50 | #include <ATen/ops/baddbmm_ops.h> |
51 | #include <ATen/ops/quantized_batch_norm_native.h> |
52 | #include <ATen/ops/quantized_batch_norm_ops.h> |
53 | #include <ATen/ops/quantized_batch_norm_native.h> |
54 | #include <ATen/ops/quantized_batch_norm_ops.h> |
55 | #include <ATen/ops/bernoulli_native.h> |
56 | #include <ATen/ops/bernoulli_ops.h> |
57 | #include <ATen/ops/bernoulli_native.h> |
58 | #include <ATen/ops/bernoulli_ops.h> |
59 | #include <ATen/ops/bernoulli_native.h> |
60 | #include <ATen/ops/bernoulli_ops.h> |
61 | #include <ATen/ops/bernoulli_native.h> |
62 | #include <ATen/ops/bernoulli_ops.h> |
63 | #include <ATen/ops/bernoulli_native.h> |
64 | #include <ATen/ops/bernoulli_ops.h> |
65 | #include <ATen/ops/bernoulli_native.h> |
66 | #include <ATen/ops/bernoulli_ops.h> |
67 | #include <ATen/ops/bernoulli_native.h> |
68 | #include <ATen/ops/bernoulli_ops.h> |
69 | #include <ATen/ops/bernoulli_native.h> |
70 | #include <ATen/ops/bernoulli_ops.h> |
71 | #include <ATen/ops/bmm_native.h> |
72 | #include <ATen/ops/bmm_ops.h> |
73 | #include <ATen/ops/bmm_native.h> |
74 | #include <ATen/ops/bmm_ops.h> |
75 | #include <ATen/ops/clamp_max_native.h> |
76 | #include <ATen/ops/clamp_max_ops.h> |
77 | #include <ATen/ops/clamp_max_native.h> |
78 | #include <ATen/ops/clamp_max_ops.h> |
79 | #include <ATen/ops/clamp_max_native.h> |
80 | #include <ATen/ops/clamp_max_ops.h> |
81 | #include <ATen/ops/clamp_max_native.h> |
82 | #include <ATen/ops/clamp_max_ops.h> |
83 | #include <ATen/ops/clamp_max_native.h> |
84 | #include <ATen/ops/clamp_max_ops.h> |
85 | #include <ATen/ops/clamp_max_native.h> |
86 | #include <ATen/ops/clamp_max_ops.h> |
87 | #include <ATen/ops/clamp_min_native.h> |
88 | #include <ATen/ops/clamp_min_ops.h> |
89 | #include <ATen/ops/clamp_min_native.h> |
90 | #include <ATen/ops/clamp_min_ops.h> |
91 | #include <ATen/ops/clamp_min_native.h> |
92 | #include <ATen/ops/clamp_min_ops.h> |
93 | #include <ATen/ops/clamp_min_native.h> |
94 | #include <ATen/ops/clamp_min_ops.h> |
95 | #include <ATen/ops/clamp_min_native.h> |
96 | #include <ATen/ops/clamp_min_ops.h> |
97 | #include <ATen/ops/clamp_min_native.h> |
98 | #include <ATen/ops/clamp_min_ops.h> |
99 | #include <ATen/ops/clip_native.h> |
100 | #include <ATen/ops/clip_ops.h> |
101 | #include <ATen/ops/clip_native.h> |
102 | #include <ATen/ops/clip_ops.h> |
103 | #include <ATen/ops/clip_native.h> |
104 | #include <ATen/ops/clip_ops.h> |
105 | #include <ATen/ops/clip_native.h> |
106 | #include <ATen/ops/clip_ops.h> |
107 | #include <ATen/ops/clip_native.h> |
108 | #include <ATen/ops/clip_ops.h> |
109 | #include <ATen/ops/clip_native.h> |
110 | #include <ATen/ops/clip_ops.h> |
111 | #include <ATen/ops/complex_native.h> |
112 | #include <ATen/ops/complex_ops.h> |
113 | #include <ATen/ops/complex_native.h> |
114 | #include <ATen/ops/complex_ops.h> |
115 | #include <ATen/ops/constant_pad_nd_native.h> |
116 | #include <ATen/ops/constant_pad_nd_ops.h> |
117 | #include <ATen/ops/constant_pad_nd_native.h> |
118 | #include <ATen/ops/constant_pad_nd_ops.h> |
119 | #include <ATen/ops/conv_tbc_native.h> |
120 | #include <ATen/ops/conv_tbc_ops.h> |
121 | #include <ATen/ops/conv_tbc_native.h> |
122 | #include <ATen/ops/conv_tbc_ops.h> |
123 | #include <ATen/ops/_copy_from_and_resize_native.h> |
124 | #include <ATen/ops/_copy_from_and_resize_ops.h> |
125 | #include <ATen/ops/_copy_from_and_resize_native.h> |
126 | #include <ATen/ops/_copy_from_and_resize_ops.h> |
127 | #include <ATen/ops/cos_native.h> |
128 | #include <ATen/ops/cos_ops.h> |
129 | #include <ATen/ops/cos_native.h> |
130 | #include <ATen/ops/cos_ops.h> |
131 | #include <ATen/ops/cos_native.h> |
132 | #include <ATen/ops/cos_ops.h> |
133 | #include <ATen/ops/count_nonzero_native.h> |
134 | #include <ATen/ops/count_nonzero_ops.h> |
135 | #include <ATen/ops/count_nonzero_native.h> |
136 | #include <ATen/ops/count_nonzero_ops.h> |
137 | #include <ATen/ops/count_nonzero_native.h> |
138 | #include <ATen/ops/count_nonzero_ops.h> |
139 | #include <ATen/ops/count_nonzero_native.h> |
140 | #include <ATen/ops/count_nonzero_ops.h> |
141 | #include <ATen/ops/cudnn_affine_grid_generator_backward_native.h> |
142 | #include <ATen/ops/cudnn_affine_grid_generator_backward_ops.h> |
143 | #include <ATen/ops/cudnn_affine_grid_generator_backward_native.h> |
144 | #include <ATen/ops/cudnn_affine_grid_generator_backward_ops.h> |
145 | #include <ATen/ops/cudnn_batch_norm_native.h> |
146 | #include <ATen/ops/cudnn_batch_norm_ops.h> |
147 | #include <ATen/ops/cudnn_batch_norm_native.h> |
148 | #include <ATen/ops/cudnn_batch_norm_ops.h> |
149 | #include <ATen/ops/mps_convolution_transpose_backward_native.h> |
150 | #include <ATen/ops/mps_convolution_transpose_backward_ops.h> |
151 | #include <ATen/ops/mps_convolution_transpose_backward_native.h> |
152 | #include <ATen/ops/mps_convolution_transpose_backward_ops.h> |
153 | #include <ATen/ops/cudnn_grid_sampler_backward_native.h> |
154 | #include <ATen/ops/cudnn_grid_sampler_backward_ops.h> |
155 | #include <ATen/ops/cudnn_grid_sampler_backward_native.h> |
156 | #include <ATen/ops/cudnn_grid_sampler_backward_ops.h> |
157 | #include <ATen/ops/cummin_native.h> |
158 | #include <ATen/ops/cummin_ops.h> |
159 | #include <ATen/ops/cummin_native.h> |
160 | #include <ATen/ops/cummin_ops.h> |
161 | #include <ATen/ops/cummin_native.h> |
162 | #include <ATen/ops/cummin_ops.h> |
163 | #include <ATen/ops/cummin_native.h> |
164 | #include <ATen/ops/cummin_ops.h> |
165 | #include <ATen/ops/cumsum_native.h> |
166 | #include <ATen/ops/cumsum_ops.h> |
167 | #include <ATen/ops/cumsum_native.h> |
168 | #include <ATen/ops/cumsum_ops.h> |
169 | #include <ATen/ops/cumsum_native.h> |
170 | #include <ATen/ops/cumsum_ops.h> |
171 | #include <ATen/ops/cumsum_native.h> |
172 | #include <ATen/ops/cumsum_ops.h> |
173 | #include <ATen/ops/cumsum_native.h> |
174 | #include <ATen/ops/cumsum_ops.h> |
175 | #include <ATen/ops/cumsum_native.h> |
176 | #include <ATen/ops/cumsum_ops.h> |
177 | #include <ATen/ops/_ctc_loss_native.h> |
178 | #include <ATen/ops/_ctc_loss_ops.h> |
179 | #include <ATen/ops/_ctc_loss_native.h> |
180 | #include <ATen/ops/_ctc_loss_ops.h> |
181 | #include <ATen/ops/_ctc_loss_native.h> |
182 | #include <ATen/ops/_ctc_loss_ops.h> |
183 | #include <ATen/ops/_ctc_loss_native.h> |
184 | #include <ATen/ops/_ctc_loss_ops.h> |
185 | #include <ATen/ops/_ctc_loss_backward_native.h> |
186 | #include <ATen/ops/_ctc_loss_backward_ops.h> |
187 | #include <ATen/ops/_ctc_loss_backward_native.h> |
188 | #include <ATen/ops/_ctc_loss_backward_ops.h> |
189 | #include <ATen/ops/embedding_native.h> |
190 | #include <ATen/ops/embedding_ops.h> |
191 | #include <ATen/ops/embedding_native.h> |
192 | #include <ATen/ops/embedding_ops.h> |
193 | #include <ATen/ops/embedding_dense_backward_native.h> |
194 | #include <ATen/ops/embedding_dense_backward_ops.h> |
195 | #include <ATen/ops/embedding_dense_backward_native.h> |
196 | #include <ATen/ops/embedding_dense_backward_ops.h> |
197 | #include <ATen/ops/new_zeros_native.h> |
198 | #include <ATen/ops/new_zeros_ops.h> |
199 | #include <ATen/ops/new_zeros_native.h> |
200 | #include <ATen/ops/new_zeros_ops.h> |
201 | #include <ATen/ops/new_ones_native.h> |
202 | #include <ATen/ops/new_ones_ops.h> |
203 | #include <ATen/ops/new_ones_native.h> |
204 | #include <ATen/ops/new_ones_ops.h> |
205 | #include <ATen/ops/_empty_per_channel_affine_quantized_native.h> |
206 | #include <ATen/ops/_empty_per_channel_affine_quantized_ops.h> |
207 | #include <ATen/ops/_empty_per_channel_affine_quantized_native.h> |
208 | #include <ATen/ops/_empty_per_channel_affine_quantized_ops.h> |
209 | #include <ATen/ops/resize_native.h> |
210 | #include <ATen/ops/resize_ops.h> |
211 | #include <ATen/ops/resize_native.h> |
212 | #include <ATen/ops/resize_ops.h> |
213 | #include <ATen/ops/resize_native.h> |
214 | #include <ATen/ops/resize_ops.h> |
215 | #include <ATen/ops/exp2_native.h> |
216 | #include <ATen/ops/exp2_ops.h> |
217 | #include <ATen/ops/exp2_native.h> |
218 | #include <ATen/ops/exp2_ops.h> |
219 | #include <ATen/ops/exp2_native.h> |
220 | #include <ATen/ops/exp2_ops.h> |
221 | #include <ATen/ops/_grid_sampler_2d_cpu_fallback_native.h> |
222 | #include <ATen/ops/_grid_sampler_2d_cpu_fallback_ops.h> |
223 | #include <ATen/ops/_grid_sampler_2d_cpu_fallback_native.h> |
224 | #include <ATen/ops/_grid_sampler_2d_cpu_fallback_ops.h> |
225 | #include <ATen/ops/grid_sampler_3d_backward_native.h> |
226 | #include <ATen/ops/grid_sampler_3d_backward_ops.h> |
227 | #include <ATen/ops/grid_sampler_3d_backward_native.h> |
228 | #include <ATen/ops/grid_sampler_3d_backward_ops.h> |
229 | #include <ATen/ops/_fft_c2c_native.h> |
230 | #include <ATen/ops/_fft_c2c_ops.h> |
231 | #include <ATen/ops/_fft_c2c_native.h> |
232 | #include <ATen/ops/_fft_c2c_ops.h> |
233 | #include <ATen/ops/index_copy_native.h> |
234 | #include <ATen/ops/index_copy_ops.h> |
235 | #include <ATen/ops/index_copy_native.h> |
236 | #include <ATen/ops/index_copy_ops.h> |
237 | #include <ATen/ops/index_copy_native.h> |
238 | #include <ATen/ops/index_copy_ops.h> |
239 | #include <ATen/ops/isin_native.h> |
240 | #include <ATen/ops/isin_ops.h> |
241 | #include <ATen/ops/isin_native.h> |
242 | #include <ATen/ops/isin_ops.h> |
243 | #include <ATen/ops/isin_native.h> |
244 | #include <ATen/ops/isin_ops.h> |
245 | #include <ATen/ops/isin_native.h> |
246 | #include <ATen/ops/isin_ops.h> |
247 | #include <ATen/ops/isin_native.h> |
248 | #include <ATen/ops/isin_ops.h> |
249 | #include <ATen/ops/isin_native.h> |
250 | #include <ATen/ops/isin_ops.h> |
251 | #include <ATen/ops/kron_native.h> |
252 | #include <ATen/ops/kron_ops.h> |
253 | #include <ATen/ops/kron_native.h> |
254 | #include <ATen/ops/kron_ops.h> |
255 | #include <ATen/ops/nan_to_num_native.h> |
256 | #include <ATen/ops/nan_to_num_ops.h> |
257 | #include <ATen/ops/nan_to_num_native.h> |
258 | #include <ATen/ops/nan_to_num_ops.h> |
259 | #include <ATen/ops/nan_to_num_native.h> |
260 | #include <ATen/ops/nan_to_num_ops.h> |
261 | #include <ATen/ops/linear_native.h> |
262 | #include <ATen/ops/linear_ops.h> |
263 | #include <ATen/ops/linear_native.h> |
264 | #include <ATen/ops/linear_ops.h> |
265 | #include <ATen/ops/mkldnn_linear_native.h> |
266 | #include <ATen/ops/mkldnn_linear_ops.h> |
267 | #include <ATen/ops/mkldnn_linear_native.h> |
268 | #include <ATen/ops/mkldnn_linear_ops.h> |
269 | #include <ATen/ops/linspace_native.h> |
270 | #include <ATen/ops/linspace_ops.h> |
271 | #include <ATen/ops/linspace_native.h> |
272 | #include <ATen/ops/linspace_ops.h> |
273 | #include <ATen/ops/log_native.h> |
274 | #include <ATen/ops/log_ops.h> |
275 | #include <ATen/ops/log_native.h> |
276 | #include <ATen/ops/log_ops.h> |
277 | #include <ATen/ops/log_native.h> |
278 | #include <ATen/ops/log_ops.h> |
279 | #include <ATen/ops/log_softmax_native.h> |
280 | #include <ATen/ops/log_softmax_ops.h> |
281 | #include <ATen/ops/log_softmax_native.h> |
282 | #include <ATen/ops/log_softmax_ops.h> |
283 | #include <ATen/ops/_log_softmax_native.h> |
284 | #include <ATen/ops/_log_softmax_ops.h> |
285 | #include <ATen/ops/_log_softmax_native.h> |
286 | #include <ATen/ops/_log_softmax_ops.h> |
287 | #include <ATen/ops/_log_softmax_backward_data_native.h> |
288 | #include <ATen/ops/_log_softmax_backward_data_ops.h> |
289 | #include <ATen/ops/_log_softmax_backward_data_native.h> |
290 | #include <ATen/ops/_log_softmax_backward_data_ops.h> |
291 | #include <ATen/ops/max_native.h> |
292 | #include <ATen/ops/max_ops.h> |
293 | #include <ATen/ops/max_native.h> |
294 | #include <ATen/ops/max_ops.h> |
295 | #include <ATen/ops/max_native.h> |
296 | #include <ATen/ops/max_ops.h> |
297 | #include <ATen/ops/max_native.h> |
298 | #include <ATen/ops/max_ops.h> |
299 | #include <ATen/ops/amax_native.h> |
300 | #include <ATen/ops/amax_ops.h> |
301 | #include <ATen/ops/amax_native.h> |
302 | #include <ATen/ops/amax_ops.h> |
303 | #include <ATen/ops/mkldnn_max_pool2d_backward_native.h> |
304 | #include <ATen/ops/mkldnn_max_pool2d_backward_ops.h> |
305 | #include <ATen/ops/mkldnn_max_pool2d_backward_native.h> |
306 | #include <ATen/ops/mkldnn_max_pool2d_backward_ops.h> |
307 | #include <ATen/ops/mkldnn_max_pool3d_native.h> |
308 | #include <ATen/ops/mkldnn_max_pool3d_ops.h> |
309 | #include <ATen/ops/mkldnn_max_pool3d_native.h> |
310 | #include <ATen/ops/mkldnn_max_pool3d_ops.h> |
311 | #include <ATen/ops/quantized_max_pool2d_native.h> |
312 | #include <ATen/ops/quantized_max_pool2d_ops.h> |
313 | #include <ATen/ops/quantized_max_pool2d_native.h> |
314 | #include <ATen/ops/quantized_max_pool2d_ops.h> |
315 | #include <ATen/ops/mean_native.h> |
316 | #include <ATen/ops/mean_ops.h> |
317 | #include <ATen/ops/mean_native.h> |
318 | #include <ATen/ops/mean_ops.h> |
319 | #include <ATen/ops/mean_native.h> |
320 | #include <ATen/ops/mean_ops.h> |
321 | #include <ATen/ops/mean_native.h> |
322 | #include <ATen/ops/mean_ops.h> |
323 | #include <ATen/ops/nanmedian_native.h> |
324 | #include <ATen/ops/nanmedian_ops.h> |
325 | #include <ATen/ops/nanmedian_native.h> |
326 | #include <ATen/ops/nanmedian_ops.h> |
327 | #include <ATen/ops/nanmedian_native.h> |
328 | #include <ATen/ops/nanmedian_ops.h> |
329 | #include <ATen/ops/nanmedian_native.h> |
330 | #include <ATen/ops/nanmedian_ops.h> |
331 | #include <ATen/ops/nanmedian_native.h> |
332 | #include <ATen/ops/nanmedian_ops.h> |
333 | #include <ATen/ops/nanmedian_native.h> |
334 | #include <ATen/ops/nanmedian_ops.h> |
335 | #include <ATen/ops/mode_native.h> |
336 | #include <ATen/ops/mode_ops.h> |
337 | #include <ATen/ops/mode_native.h> |
338 | #include <ATen/ops/mode_ops.h> |
339 | #include <ATen/ops/mode_native.h> |
340 | #include <ATen/ops/mode_ops.h> |
341 | #include <ATen/ops/mode_native.h> |
342 | #include <ATen/ops/mode_ops.h> |
343 | #include <ATen/ops/multiply_native.h> |
344 | #include <ATen/ops/multiply_ops.h> |
345 | #include <ATen/ops/multiply_native.h> |
346 | #include <ATen/ops/multiply_ops.h> |
347 | #include <ATen/ops/multiply_native.h> |
348 | #include <ATen/ops/multiply_ops.h> |
349 | #include <ATen/ops/narrow_copy_native.h> |
350 | #include <ATen/ops/narrow_copy_ops.h> |
351 | #include <ATen/ops/narrow_copy_native.h> |
352 | #include <ATen/ops/narrow_copy_ops.h> |
353 | #include <ATen/ops/batch_norm_gather_stats_native.h> |
354 | #include <ATen/ops/batch_norm_gather_stats_ops.h> |
355 | #include <ATen/ops/batch_norm_gather_stats_native.h> |
356 | #include <ATen/ops/batch_norm_gather_stats_ops.h> |
357 | #include <ATen/ops/batch_norm_gather_stats_with_counts_native.h> |
358 | #include <ATen/ops/batch_norm_gather_stats_with_counts_ops.h> |
359 | #include <ATen/ops/batch_norm_gather_stats_with_counts_native.h> |
360 | #include <ATen/ops/batch_norm_gather_stats_with_counts_ops.h> |
361 | #include <ATen/ops/native_batch_norm_backward_native.h> |
362 | #include <ATen/ops/native_batch_norm_backward_ops.h> |
363 | #include <ATen/ops/native_batch_norm_backward_native.h> |
364 | #include <ATen/ops/native_batch_norm_backward_ops.h> |
365 | #include <ATen/ops/ones_native.h> |
366 | #include <ATen/ops/ones_ops.h> |
367 | #include <ATen/ops/ones_native.h> |
368 | #include <ATen/ops/ones_ops.h> |
369 | #include <ATen/ops/ones_native.h> |
370 | #include <ATen/ops/ones_ops.h> |
371 | #include <ATen/ops/ones_native.h> |
372 | #include <ATen/ops/ones_ops.h> |
373 | #include <ATen/ops/_pdist_forward_native.h> |
374 | #include <ATen/ops/_pdist_forward_ops.h> |
375 | #include <ATen/ops/_pdist_forward_native.h> |
376 | #include <ATen/ops/_pdist_forward_ops.h> |
377 | #include <ATen/ops/_pdist_backward_native.h> |
378 | #include <ATen/ops/_pdist_backward_ops.h> |
379 | #include <ATen/ops/_pdist_backward_native.h> |
380 | #include <ATen/ops/_pdist_backward_ops.h> |
381 | #include <ATen/ops/pixel_shuffle_native.h> |
382 | #include <ATen/ops/pixel_shuffle_ops.h> |
383 | #include <ATen/ops/pixel_shuffle_native.h> |
384 | #include <ATen/ops/pixel_shuffle_ops.h> |
385 | #include <ATen/ops/_pin_memory_native.h> |
386 | #include <ATen/ops/_pin_memory_ops.h> |
387 | #include <ATen/ops/_pin_memory_native.h> |
388 | #include <ATen/ops/_pin_memory_ops.h> |
389 | #include <ATen/ops/randn_native.h> |
390 | #include <ATen/ops/randn_ops.h> |
391 | #include <ATen/ops/randn_native.h> |
392 | #include <ATen/ops/randn_ops.h> |
393 | #include <ATen/ops/randn_native.h> |
394 | #include <ATen/ops/randn_ops.h> |
395 | #include <ATen/ops/randn_native.h> |
396 | #include <ATen/ops/randn_ops.h> |
397 | #include <ATen/ops/randn_native.h> |
398 | #include <ATen/ops/randn_ops.h> |
399 | #include <ATen/ops/randn_native.h> |
400 | #include <ATen/ops/randn_ops.h> |
401 | #include <ATen/ops/randn_native.h> |
402 | #include <ATen/ops/randn_ops.h> |
403 | #include <ATen/ops/randn_native.h> |
404 | #include <ATen/ops/randn_ops.h> |
405 | #include <ATen/ops/randn_like_native.h> |
406 | #include <ATen/ops/randn_like_ops.h> |
407 | #include <ATen/ops/randn_like_native.h> |
408 | #include <ATen/ops/randn_like_ops.h> |
409 | #include <ATen/ops/neg_native.h> |
410 | #include <ATen/ops/neg_ops.h> |
411 | #include <ATen/ops/neg_native.h> |
412 | #include <ATen/ops/neg_ops.h> |
413 | #include <ATen/ops/neg_native.h> |
414 | #include <ATen/ops/neg_ops.h> |
415 | #include <ATen/ops/negative_native.h> |
416 | #include <ATen/ops/negative_ops.h> |
417 | #include <ATen/ops/negative_native.h> |
418 | #include <ATen/ops/negative_ops.h> |
419 | #include <ATen/ops/negative_native.h> |
420 | #include <ATen/ops/negative_ops.h> |
421 | #include <ATen/ops/repeat_interleave_native.h> |
422 | #include <ATen/ops/repeat_interleave_ops.h> |
423 | #include <ATen/ops/repeat_interleave_native.h> |
424 | #include <ATen/ops/repeat_interleave_ops.h> |
425 | #include <ATen/ops/gelu_native.h> |
426 | #include <ATen/ops/gelu_ops.h> |
427 | #include <ATen/ops/gelu_native.h> |
428 | #include <ATen/ops/gelu_ops.h> |
429 | #include <ATen/ops/gelu_native.h> |
430 | #include <ATen/ops/gelu_ops.h> |
431 | #include <ATen/ops/select_backward_native.h> |
432 | #include <ATen/ops/select_backward_ops.h> |
433 | #include <ATen/ops/select_backward_native.h> |
434 | #include <ATen/ops/select_backward_ops.h> |
435 | #include <ATen/ops/mish_native.h> |
436 | #include <ATen/ops/mish_ops.h> |
437 | #include <ATen/ops/mish_native.h> |
438 | #include <ATen/ops/mish_ops.h> |
439 | #include <ATen/ops/mish_native.h> |
440 | #include <ATen/ops/mish_ops.h> |
441 | #include <ATen/ops/slice_scatter_native.h> |
442 | #include <ATen/ops/slice_scatter_ops.h> |
443 | #include <ATen/ops/slice_scatter_native.h> |
444 | #include <ATen/ops/slice_scatter_ops.h> |
445 | #include <ATen/ops/diagonal_scatter_native.h> |
446 | #include <ATen/ops/diagonal_scatter_ops.h> |
447 | #include <ATen/ops/diagonal_scatter_native.h> |
448 | #include <ATen/ops/diagonal_scatter_ops.h> |
449 | #include <ATen/ops/_softmax_backward_data_native.h> |
450 | #include <ATen/ops/_softmax_backward_data_ops.h> |
451 | #include <ATen/ops/_softmax_backward_data_native.h> |
452 | #include <ATen/ops/_softmax_backward_data_ops.h> |
453 | #include <ATen/ops/unsafe_split_native.h> |
454 | #include <ATen/ops/unsafe_split_ops.h> |
455 | #include <ATen/ops/unsafe_split_native.h> |
456 | #include <ATen/ops/unsafe_split_ops.h> |
457 | #include <ATen/ops/unsafe_split_with_sizes_native.h> |
458 | #include <ATen/ops/unsafe_split_with_sizes_ops.h> |
459 | #include <ATen/ops/unsafe_split_with_sizes_native.h> |
460 | #include <ATen/ops/unsafe_split_with_sizes_ops.h> |
461 | #include <ATen/ops/square_native.h> |
462 | #include <ATen/ops/square_ops.h> |
463 | #include <ATen/ops/square_native.h> |
464 | #include <ATen/ops/square_ops.h> |
465 | #include <ATen/ops/square_native.h> |
466 | #include <ATen/ops/square_ops.h> |
467 | #include <ATen/ops/tanh_native.h> |
468 | #include <ATen/ops/tanh_ops.h> |
469 | #include <ATen/ops/tanh_native.h> |
470 | #include <ATen/ops/tanh_ops.h> |
471 | #include <ATen/ops/tanh_native.h> |
472 | #include <ATen/ops/tanh_ops.h> |
473 | #include <ATen/ops/roll_native.h> |
474 | #include <ATen/ops/roll_ops.h> |
475 | #include <ATen/ops/roll_native.h> |
476 | #include <ATen/ops/roll_ops.h> |
477 | #include <ATen/ops/rot90_native.h> |
478 | #include <ATen/ops/rot90_ops.h> |
479 | #include <ATen/ops/rot90_native.h> |
480 | #include <ATen/ops/rot90_ops.h> |
481 | #include <ATen/ops/_trilinear_native.h> |
482 | #include <ATen/ops/_trilinear_ops.h> |
483 | #include <ATen/ops/_trilinear_native.h> |
484 | #include <ATen/ops/_trilinear_ops.h> |
485 | #include <ATen/ops/_unique_native.h> |
486 | #include <ATen/ops/_unique_ops.h> |
487 | #include <ATen/ops/_unique_native.h> |
488 | #include <ATen/ops/_unique_ops.h> |
489 | #include <ATen/ops/_unique2_native.h> |
490 | #include <ATen/ops/_unique2_ops.h> |
491 | #include <ATen/ops/_unique2_native.h> |
492 | #include <ATen/ops/_unique2_ops.h> |
493 | #include <ATen/ops/_weight_norm_interface_native.h> |
494 | #include <ATen/ops/_weight_norm_interface_ops.h> |
495 | #include <ATen/ops/_weight_norm_interface_native.h> |
496 | #include <ATen/ops/_weight_norm_interface_ops.h> |
497 | #include <ATen/ops/_efficientzerotensor_native.h> |
498 | #include <ATen/ops/_efficientzerotensor_ops.h> |
499 | #include <ATen/ops/_efficientzerotensor_native.h> |
500 | #include <ATen/ops/_efficientzerotensor_ops.h> |
501 | #include <ATen/ops/_standard_gamma_native.h> |
502 | #include <ATen/ops/_standard_gamma_ops.h> |
503 | #include <ATen/ops/_standard_gamma_native.h> |
504 | #include <ATen/ops/_standard_gamma_ops.h> |
505 | #include <ATen/ops/_dirichlet_grad_native.h> |
506 | #include <ATen/ops/_dirichlet_grad_ops.h> |
507 | #include <ATen/ops/_dirichlet_grad_native.h> |
508 | #include <ATen/ops/_dirichlet_grad_ops.h> |
509 | #include <ATen/ops/norm_native.h> |
510 | #include <ATen/ops/norm_ops.h> |
511 | #include <ATen/ops/norm_native.h> |
512 | #include <ATen/ops/norm_ops.h> |
513 | #include <ATen/ops/norm_native.h> |
514 | #include <ATen/ops/norm_ops.h> |
515 | #include <ATen/ops/norm_native.h> |
516 | #include <ATen/ops/norm_ops.h> |
517 | #include <ATen/ops/norm_native.h> |
518 | #include <ATen/ops/norm_ops.h> |
519 | #include <ATen/ops/norm_native.h> |
520 | #include <ATen/ops/norm_ops.h> |
521 | #include <ATen/ops/norm_native.h> |
522 | #include <ATen/ops/norm_ops.h> |
523 | #include <ATen/ops/norm_native.h> |
524 | #include <ATen/ops/norm_ops.h> |
525 | #include <ATen/ops/norm_native.h> |
526 | #include <ATen/ops/norm_ops.h> |
527 | #include <ATen/ops/norm_native.h> |
528 | #include <ATen/ops/norm_ops.h> |
529 | #include <ATen/ops/norm_native.h> |
530 | #include <ATen/ops/norm_ops.h> |
531 | #include <ATen/ops/norm_native.h> |
532 | #include <ATen/ops/norm_ops.h> |
533 | #include <ATen/ops/frexp_native.h> |
534 | #include <ATen/ops/frexp_ops.h> |
535 | #include <ATen/ops/frexp_native.h> |
536 | #include <ATen/ops/frexp_ops.h> |
537 | #include <ATen/ops/frobenius_norm_native.h> |
538 | #include <ATen/ops/frobenius_norm_ops.h> |
539 | #include <ATen/ops/frobenius_norm_native.h> |
540 | #include <ATen/ops/frobenius_norm_ops.h> |
541 | #include <ATen/ops/nuclear_norm_native.h> |
542 | #include <ATen/ops/nuclear_norm_ops.h> |
543 | #include <ATen/ops/nuclear_norm_native.h> |
544 | #include <ATen/ops/nuclear_norm_ops.h> |
545 | #include <ATen/ops/nuclear_norm_native.h> |
546 | #include <ATen/ops/nuclear_norm_ops.h> |
547 | #include <ATen/ops/nuclear_norm_native.h> |
548 | #include <ATen/ops/nuclear_norm_ops.h> |
549 | #include <ATen/ops/subtract_native.h> |
550 | #include <ATen/ops/subtract_ops.h> |
551 | #include <ATen/ops/subtract_native.h> |
552 | #include <ATen/ops/subtract_ops.h> |
553 | #include <ATen/ops/subtract_native.h> |
554 | #include <ATen/ops/subtract_ops.h> |
555 | #include <ATen/ops/sparse_sampled_addmm_native.h> |
556 | #include <ATen/ops/sparse_sampled_addmm_ops.h> |
557 | #include <ATen/ops/sparse_sampled_addmm_native.h> |
558 | #include <ATen/ops/sparse_sampled_addmm_ops.h> |
559 | #include <ATen/ops/_addmm_activation_native.h> |
560 | #include <ATen/ops/_addmm_activation_ops.h> |
561 | #include <ATen/ops/_addmm_activation_native.h> |
562 | #include <ATen/ops/_addmm_activation_ops.h> |
563 | #include <ATen/ops/_to_dense_native.h> |
564 | #include <ATen/ops/_to_dense_ops.h> |
565 | #include <ATen/ops/_to_dense_native.h> |
566 | #include <ATen/ops/_to_dense_ops.h> |
567 | #include <ATen/ops/_coalesced_native.h> |
568 | #include <ATen/ops/_coalesced_ops.h> |
569 | #include <ATen/ops/_coalesced_native.h> |
570 | #include <ATen/ops/_coalesced_ops.h> |
571 | #include <ATen/ops/_coalesced_native.h> |
572 | #include <ATen/ops/_coalesced_ops.h> |
573 | #include <ATen/ops/to_sparse_csr_native.h> |
574 | #include <ATen/ops/to_sparse_csr_ops.h> |
575 | #include <ATen/ops/to_sparse_csr_native.h> |
576 | #include <ATen/ops/to_sparse_csr_ops.h> |
577 | #include <ATen/ops/to_sparse_csc_native.h> |
578 | #include <ATen/ops/to_sparse_csc_ops.h> |
579 | #include <ATen/ops/to_sparse_csc_native.h> |
580 | #include <ATen/ops/to_sparse_csc_ops.h> |
581 | #include <ATen/ops/to_sparse_bsc_native.h> |
582 | #include <ATen/ops/to_sparse_bsc_ops.h> |
583 | #include <ATen/ops/to_sparse_bsc_native.h> |
584 | #include <ATen/ops/to_sparse_bsc_ops.h> |
585 | #include <ATen/ops/quantize_per_tensor_dynamic_native.h> |
586 | #include <ATen/ops/quantize_per_tensor_dynamic_ops.h> |
587 | #include <ATen/ops/quantize_per_tensor_dynamic_native.h> |
588 | #include <ATen/ops/quantize_per_tensor_dynamic_ops.h> |
589 | #include <ATen/ops/quantize_per_tensor_native.h> |
590 | #include <ATen/ops/quantize_per_tensor_ops.h> |
591 | #include <ATen/ops/quantize_per_tensor_native.h> |
592 | #include <ATen/ops/quantize_per_tensor_ops.h> |
593 | #include <ATen/ops/quantize_per_tensor_native.h> |
594 | #include <ATen/ops/quantize_per_tensor_ops.h> |
595 | #include <ATen/ops/quantize_per_tensor_native.h> |
596 | #include <ATen/ops/quantize_per_tensor_ops.h> |
597 | #include <ATen/ops/quantize_per_tensor_native.h> |
598 | #include <ATen/ops/quantize_per_tensor_ops.h> |
599 | #include <ATen/ops/quantize_per_tensor_native.h> |
600 | #include <ATen/ops/quantize_per_tensor_ops.h> |
601 | #include <ATen/ops/quantize_per_channel_native.h> |
602 | #include <ATen/ops/quantize_per_channel_ops.h> |
603 | #include <ATen/ops/quantize_per_channel_native.h> |
604 | #include <ATen/ops/quantize_per_channel_ops.h> |
605 | #include <ATen/ops/_make_per_channel_quantized_tensor_native.h> |
606 | #include <ATen/ops/_make_per_channel_quantized_tensor_ops.h> |
607 | #include <ATen/ops/_make_per_channel_quantized_tensor_native.h> |
608 | #include <ATen/ops/_make_per_channel_quantized_tensor_ops.h> |
609 | #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h> |
610 | #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_ops.h> |
611 | #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h> |
612 | #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_ops.h> |
613 | #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_native.h> |
614 | #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_ops.h> |
615 | #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_native.h> |
616 | #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_ops.h> |
617 | #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_native.h> |
618 | #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_ops.h> |
619 | #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_native.h> |
620 | #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_ops.h> |
621 | #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_native.h> |
622 | #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_ops.h> |
623 | #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_native.h> |
624 | #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_ops.h> |
625 | #include <ATen/ops/lstm_mps_backward_native.h> |
626 | #include <ATen/ops/lstm_mps_backward_ops.h> |
627 | #include <ATen/ops/lstm_mps_backward_native.h> |
628 | #include <ATen/ops/lstm_mps_backward_ops.h> |
629 | #include <ATen/ops/lift_fresh_copy_native.h> |
630 | #include <ATen/ops/lift_fresh_copy_ops.h> |
631 | #include <ATen/ops/lift_fresh_copy_native.h> |
632 | #include <ATen/ops/lift_fresh_copy_ops.h> |
633 | #include <ATen/ops/_masked_softmax_backward_native.h> |
634 | #include <ATen/ops/_masked_softmax_backward_ops.h> |
635 | #include <ATen/ops/_masked_softmax_backward_native.h> |
636 | #include <ATen/ops/_masked_softmax_backward_ops.h> |
637 | #include <ATen/ops/put_native.h> |
638 | #include <ATen/ops/put_ops.h> |
639 | #include <ATen/ops/put_native.h> |
640 | #include <ATen/ops/put_ops.h> |
641 | #include <ATen/ops/put_native.h> |
642 | #include <ATen/ops/put_ops.h> |
643 | #include <ATen/ops/index_add_native.h> |
644 | #include <ATen/ops/index_add_ops.h> |
645 | #include <ATen/ops/index_add_native.h> |
646 | #include <ATen/ops/index_add_ops.h> |
647 | #include <ATen/ops/index_add_native.h> |
648 | #include <ATen/ops/index_add_ops.h> |
649 | #include <ATen/ops/index_fill_native.h> |
650 | #include <ATen/ops/index_fill_ops.h> |
651 | #include <ATen/ops/index_fill_native.h> |
652 | #include <ATen/ops/index_fill_ops.h> |
653 | #include <ATen/ops/index_fill_native.h> |
654 | #include <ATen/ops/index_fill_ops.h> |
655 | #include <ATen/ops/index_fill_native.h> |
656 | #include <ATen/ops/index_fill_ops.h> |
657 | #include <ATen/ops/index_fill_native.h> |
658 | #include <ATen/ops/index_fill_ops.h> |
659 | #include <ATen/ops/index_fill_native.h> |
660 | #include <ATen/ops/index_fill_ops.h> |
661 | #include <ATen/ops/scatter_native.h> |
662 | #include <ATen/ops/scatter_ops.h> |
663 | #include <ATen/ops/scatter_native.h> |
664 | #include <ATen/ops/scatter_ops.h> |
665 | #include <ATen/ops/scatter_native.h> |
666 | #include <ATen/ops/scatter_ops.h> |
667 | #include <ATen/ops/scatter_native.h> |
668 | #include <ATen/ops/scatter_ops.h> |
669 | #include <ATen/ops/scatter_native.h> |
670 | #include <ATen/ops/scatter_ops.h> |
671 | #include <ATen/ops/scatter_native.h> |
672 | #include <ATen/ops/scatter_ops.h> |
673 | #include <ATen/ops/scatter_native.h> |
674 | #include <ATen/ops/scatter_ops.h> |
675 | #include <ATen/ops/scatter_native.h> |
676 | #include <ATen/ops/scatter_ops.h> |
677 | #include <ATen/ops/scatter_native.h> |
678 | #include <ATen/ops/scatter_ops.h> |
679 | #include <ATen/ops/scatter_native.h> |
680 | #include <ATen/ops/scatter_ops.h> |
681 | #include <ATen/ops/scatter_native.h> |
682 | #include <ATen/ops/scatter_ops.h> |
683 | #include <ATen/ops/scatter_native.h> |
684 | #include <ATen/ops/scatter_ops.h> |
685 | #include <ATen/ops/scatter_add_native.h> |
686 | #include <ATen/ops/scatter_add_ops.h> |
687 | #include <ATen/ops/scatter_add_native.h> |
688 | #include <ATen/ops/scatter_add_ops.h> |
689 | #include <ATen/ops/scatter_add_native.h> |
690 | #include <ATen/ops/scatter_add_ops.h> |
691 | #include <ATen/ops/lshift_native.h> |
692 | #include <ATen/ops/lshift_ops.h> |
693 | #include <ATen/ops/lshift_native.h> |
694 | #include <ATen/ops/lshift_ops.h> |
695 | #include <ATen/ops/lshift_native.h> |
696 | #include <ATen/ops/lshift_ops.h> |
697 | #include <ATen/ops/lshift_native.h> |
698 | #include <ATen/ops/lshift_ops.h> |
699 | #include <ATen/ops/lshift_native.h> |
700 | #include <ATen/ops/lshift_ops.h> |
701 | #include <ATen/ops/lshift_native.h> |
702 | #include <ATen/ops/lshift_ops.h> |
703 | #include <ATen/ops/greater_equal_native.h> |
704 | #include <ATen/ops/greater_equal_ops.h> |
705 | #include <ATen/ops/greater_equal_native.h> |
706 | #include <ATen/ops/greater_equal_ops.h> |
707 | #include <ATen/ops/greater_equal_native.h> |
708 | #include <ATen/ops/greater_equal_ops.h> |
709 | #include <ATen/ops/greater_equal_native.h> |
710 | #include <ATen/ops/greater_equal_ops.h> |
711 | #include <ATen/ops/greater_equal_native.h> |
712 | #include <ATen/ops/greater_equal_ops.h> |
713 | #include <ATen/ops/greater_equal_native.h> |
714 | #include <ATen/ops/greater_equal_ops.h> |
715 | #include <ATen/ops/less_equal_native.h> |
716 | #include <ATen/ops/less_equal_ops.h> |
717 | #include <ATen/ops/less_equal_native.h> |
718 | #include <ATen/ops/less_equal_ops.h> |
719 | #include <ATen/ops/less_equal_native.h> |
720 | #include <ATen/ops/less_equal_ops.h> |
721 | #include <ATen/ops/less_equal_native.h> |
722 | #include <ATen/ops/less_equal_ops.h> |
723 | #include <ATen/ops/less_equal_native.h> |
724 | #include <ATen/ops/less_equal_ops.h> |
725 | #include <ATen/ops/less_equal_native.h> |
726 | #include <ATen/ops/less_equal_ops.h> |
727 | #include <ATen/ops/greater_native.h> |
728 | #include <ATen/ops/greater_ops.h> |
729 | #include <ATen/ops/greater_native.h> |
730 | #include <ATen/ops/greater_ops.h> |
731 | #include <ATen/ops/greater_native.h> |
732 | #include <ATen/ops/greater_ops.h> |
733 | #include <ATen/ops/greater_native.h> |
734 | #include <ATen/ops/greater_ops.h> |
735 | #include <ATen/ops/greater_native.h> |
736 | #include <ATen/ops/greater_ops.h> |
737 | #include <ATen/ops/greater_native.h> |
738 | #include <ATen/ops/greater_ops.h> |
739 | #include <ATen/ops/lt_native.h> |
740 | #include <ATen/ops/lt_ops.h> |
741 | #include <ATen/ops/lt_native.h> |
742 | #include <ATen/ops/lt_ops.h> |
743 | #include <ATen/ops/lt_native.h> |
744 | #include <ATen/ops/lt_ops.h> |
745 | #include <ATen/ops/lt_native.h> |
746 | #include <ATen/ops/lt_ops.h> |
747 | #include <ATen/ops/lt_native.h> |
748 | #include <ATen/ops/lt_ops.h> |
749 | #include <ATen/ops/lt_native.h> |
750 | #include <ATen/ops/lt_ops.h> |
751 | #include <ATen/ops/less_native.h> |
752 | #include <ATen/ops/less_ops.h> |
753 | #include <ATen/ops/less_native.h> |
754 | #include <ATen/ops/less_ops.h> |
755 | #include <ATen/ops/less_native.h> |
756 | #include <ATen/ops/less_ops.h> |
757 | #include <ATen/ops/less_native.h> |
758 | #include <ATen/ops/less_ops.h> |
759 | #include <ATen/ops/less_native.h> |
760 | #include <ATen/ops/less_ops.h> |
761 | #include <ATen/ops/less_native.h> |
762 | #include <ATen/ops/less_ops.h> |
763 | #include <ATen/ops/take_native.h> |
764 | #include <ATen/ops/take_ops.h> |
765 | #include <ATen/ops/take_native.h> |
766 | #include <ATen/ops/take_ops.h> |
767 | #include <ATen/ops/gather_native.h> |
768 | #include <ATen/ops/gather_ops.h> |
769 | #include <ATen/ops/gather_native.h> |
770 | #include <ATen/ops/gather_ops.h> |
771 | #include <ATen/ops/gather_native.h> |
772 | #include <ATen/ops/gather_ops.h> |
773 | #include <ATen/ops/gather_native.h> |
774 | #include <ATen/ops/gather_ops.h> |
775 | #include <ATen/ops/cholesky_native.h> |
776 | #include <ATen/ops/cholesky_ops.h> |
777 | #include <ATen/ops/cholesky_native.h> |
778 | #include <ATen/ops/cholesky_ops.h> |
779 | #include <ATen/ops/_cholesky_solve_helper_native.h> |
780 | #include <ATen/ops/_cholesky_solve_helper_ops.h> |
781 | #include <ATen/ops/_cholesky_solve_helper_native.h> |
782 | #include <ATen/ops/_cholesky_solve_helper_ops.h> |
783 | #include <ATen/ops/polygamma_native.h> |
784 | #include <ATen/ops/polygamma_ops.h> |
785 | #include <ATen/ops/polygamma_native.h> |
786 | #include <ATen/ops/polygamma_ops.h> |
787 | #include <ATen/ops/igamma_native.h> |
788 | #include <ATen/ops/igamma_ops.h> |
789 | #include <ATen/ops/igamma_native.h> |
790 | #include <ATen/ops/igamma_ops.h> |
791 | #include <ATen/ops/igamma_native.h> |
792 | #include <ATen/ops/igamma_ops.h> |
793 | #include <ATen/ops/fmin_native.h> |
794 | #include <ATen/ops/fmin_ops.h> |
795 | #include <ATen/ops/fmin_native.h> |
796 | #include <ATen/ops/fmin_ops.h> |
797 | #include <ATen/ops/max_native.h> |
798 | #include <ATen/ops/max_ops.h> |
799 | #include <ATen/ops/max_native.h> |
800 | #include <ATen/ops/max_ops.h> |
801 | #include <ATen/ops/fmax_native.h> |
802 | #include <ATen/ops/fmax_ops.h> |
803 | #include <ATen/ops/fmax_native.h> |
804 | #include <ATen/ops/fmax_ops.h> |
805 | #include <ATen/ops/maximum_native.h> |
806 | #include <ATen/ops/maximum_ops.h> |
807 | #include <ATen/ops/maximum_native.h> |
808 | #include <ATen/ops/maximum_ops.h> |
809 | #include <ATen/ops/max_native.h> |
810 | #include <ATen/ops/max_ops.h> |
811 | #include <ATen/ops/max_native.h> |
812 | #include <ATen/ops/max_ops.h> |
813 | #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h> |
814 | #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h> |
815 | #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h> |
816 | #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h> |
817 | #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h> |
818 | #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h> |
819 | #include <ATen/ops/_foreach_sub_native.h> |
820 | #include <ATen/ops/_foreach_sub_ops.h> |
821 | #include <ATen/ops/_foreach_sub_native.h> |
822 | #include <ATen/ops/_foreach_sub_ops.h> |
823 | #include <ATen/ops/_foreach_sub_native.h> |
824 | #include <ATen/ops/_foreach_sub_ops.h> |
825 | #include <ATen/ops/_foreach_maximum_native.h> |
826 | #include <ATen/ops/_foreach_maximum_ops.h> |
827 | #include <ATen/ops/_foreach_maximum_native.h> |
828 | #include <ATen/ops/_foreach_maximum_ops.h> |
829 | #include <ATen/ops/_foreach_maximum_native.h> |
830 | #include <ATen/ops/_foreach_maximum_ops.h> |
831 | #include <ATen/ops/_foreach_sub_native.h> |
832 | #include <ATen/ops/_foreach_sub_ops.h> |
833 | #include <ATen/ops/_foreach_sub_native.h> |
834 | #include <ATen/ops/_foreach_sub_ops.h> |
835 | #include <ATen/ops/_foreach_sub_native.h> |
836 | #include <ATen/ops/_foreach_sub_ops.h> |
837 | #include <ATen/ops/_foreach_maximum_native.h> |
838 | #include <ATen/ops/_foreach_maximum_ops.h> |
839 | #include <ATen/ops/_foreach_maximum_native.h> |
840 | #include <ATen/ops/_foreach_maximum_ops.h> |
841 | #include <ATen/ops/_foreach_maximum_native.h> |
842 | #include <ATen/ops/_foreach_maximum_ops.h> |
843 | #include <ATen/ops/_foreach_sub_native.h> |
844 | #include <ATen/ops/_foreach_sub_ops.h> |
845 | #include <ATen/ops/_foreach_sub_native.h> |
846 | #include <ATen/ops/_foreach_sub_ops.h> |
847 | #include <ATen/ops/_foreach_sub_native.h> |
848 | #include <ATen/ops/_foreach_sub_ops.h> |
849 | #include <ATen/ops/_foreach_maximum_native.h> |
850 | #include <ATen/ops/_foreach_maximum_ops.h> |
851 | #include <ATen/ops/_foreach_maximum_native.h> |
852 | #include <ATen/ops/_foreach_maximum_ops.h> |
853 | #include <ATen/ops/_foreach_maximum_native.h> |
854 | #include <ATen/ops/_foreach_maximum_ops.h> |
855 | #include <ATen/ops/_foreach_exp_native.h> |
856 | #include <ATen/ops/_foreach_exp_ops.h> |
857 | #include <ATen/ops/_foreach_exp_native.h> |
858 | #include <ATen/ops/_foreach_exp_ops.h> |
859 | #include <ATen/ops/_foreach_exp_native.h> |
860 | #include <ATen/ops/_foreach_exp_ops.h> |
861 | #include <ATen/ops/_foreach_sqrt_native.h> |
862 | #include <ATen/ops/_foreach_sqrt_ops.h> |
863 | #include <ATen/ops/_foreach_sqrt_native.h> |
864 | #include <ATen/ops/_foreach_sqrt_ops.h> |
865 | #include <ATen/ops/_foreach_sqrt_native.h> |
866 | #include <ATen/ops/_foreach_sqrt_ops.h> |
867 | #include <ATen/ops/_foreach_abs_native.h> |
868 | #include <ATen/ops/_foreach_abs_ops.h> |
869 | #include <ATen/ops/_foreach_abs_native.h> |
870 | #include <ATen/ops/_foreach_abs_ops.h> |
871 | #include <ATen/ops/_foreach_abs_native.h> |
872 | #include <ATen/ops/_foreach_abs_ops.h> |
873 | #include <ATen/ops/_foreach_acos_native.h> |
874 | #include <ATen/ops/_foreach_acos_ops.h> |
875 | #include <ATen/ops/_foreach_acos_native.h> |
876 | #include <ATen/ops/_foreach_acos_ops.h> |
877 | #include <ATen/ops/_foreach_acos_native.h> |
878 | #include <ATen/ops/_foreach_acos_ops.h> |
879 | #include <ATen/ops/_foreach_cos_native.h> |
880 | #include <ATen/ops/_foreach_cos_ops.h> |
881 | #include <ATen/ops/_foreach_cos_native.h> |
882 | #include <ATen/ops/_foreach_cos_ops.h> |
883 | #include <ATen/ops/_foreach_cos_native.h> |
884 | #include <ATen/ops/_foreach_cos_ops.h> |
885 | #include <ATen/ops/_foreach_floor_native.h> |
886 | #include <ATen/ops/_foreach_floor_ops.h> |
887 | #include <ATen/ops/_foreach_floor_native.h> |
888 | #include <ATen/ops/_foreach_floor_ops.h> |
889 | #include <ATen/ops/_foreach_floor_native.h> |
890 | #include <ATen/ops/_foreach_floor_ops.h> |
891 | #include <ATen/ops/_foreach_log10_native.h> |
892 | #include <ATen/ops/_foreach_log10_ops.h> |
893 | #include <ATen/ops/_foreach_log10_native.h> |
894 | #include <ATen/ops/_foreach_log10_ops.h> |
895 | #include <ATen/ops/_foreach_log10_native.h> |
896 | #include <ATen/ops/_foreach_log10_ops.h> |
897 | #include <ATen/ops/_foreach_neg_native.h> |
898 | #include <ATen/ops/_foreach_neg_ops.h> |
899 | #include <ATen/ops/_foreach_neg_native.h> |
900 | #include <ATen/ops/_foreach_neg_ops.h> |
901 | #include <ATen/ops/_foreach_neg_native.h> |
902 | #include <ATen/ops/_foreach_neg_ops.h> |
903 | #include <ATen/ops/_foreach_tan_native.h> |
904 | #include <ATen/ops/_foreach_tan_ops.h> |
905 | #include <ATen/ops/_foreach_tan_native.h> |
906 | #include <ATen/ops/_foreach_tan_ops.h> |
907 | #include <ATen/ops/_foreach_tan_native.h> |
908 | #include <ATen/ops/_foreach_tan_ops.h> |
909 | #include <ATen/ops/_foreach_sigmoid_native.h> |
910 | #include <ATen/ops/_foreach_sigmoid_ops.h> |
911 | #include <ATen/ops/_foreach_sigmoid_native.h> |
912 | #include <ATen/ops/_foreach_sigmoid_ops.h> |
913 | #include <ATen/ops/_foreach_sigmoid_native.h> |
914 | #include <ATen/ops/_foreach_sigmoid_ops.h> |
915 | #include <ATen/ops/_foreach_norm_native.h> |
916 | #include <ATen/ops/_foreach_norm_ops.h> |
917 | #include <ATen/ops/_foreach_norm_native.h> |
918 | #include <ATen/ops/_foreach_norm_ops.h> |
919 | #include <ATen/ops/searchsorted_native.h> |
920 | #include <ATen/ops/searchsorted_ops.h> |
921 | #include <ATen/ops/searchsorted_native.h> |
922 | #include <ATen/ops/searchsorted_ops.h> |
923 | #include <ATen/ops/searchsorted_native.h> |
924 | #include <ATen/ops/searchsorted_ops.h> |
925 | #include <ATen/ops/searchsorted_native.h> |
926 | #include <ATen/ops/searchsorted_ops.h> |
927 | #include <ATen/ops/mse_loss_backward_native.h> |
928 | #include <ATen/ops/mse_loss_backward_ops.h> |
929 | #include <ATen/ops/mse_loss_backward_native.h> |
930 | #include <ATen/ops/mse_loss_backward_ops.h> |
931 | #include <ATen/ops/smooth_l1_loss_backward_native.h> |
932 | #include <ATen/ops/smooth_l1_loss_backward_ops.h> |
933 | #include <ATen/ops/smooth_l1_loss_backward_native.h> |
934 | #include <ATen/ops/smooth_l1_loss_backward_ops.h> |
935 | #include <ATen/ops/huber_loss_backward_native.h> |
936 | #include <ATen/ops/huber_loss_backward_ops.h> |
937 | #include <ATen/ops/huber_loss_backward_native.h> |
938 | #include <ATen/ops/huber_loss_backward_ops.h> |
939 | #include <ATen/ops/elu_backward_native.h> |
940 | #include <ATen/ops/elu_backward_ops.h> |
941 | #include <ATen/ops/elu_backward_native.h> |
942 | #include <ATen/ops/elu_backward_ops.h> |
943 | #include <ATen/ops/glu_jvp_native.h> |
944 | #include <ATen/ops/glu_jvp_ops.h> |
945 | #include <ATen/ops/glu_jvp_native.h> |
946 | #include <ATen/ops/glu_jvp_ops.h> |
947 | #include <ATen/ops/hardsigmoid_backward_native.h> |
948 | #include <ATen/ops/hardsigmoid_backward_ops.h> |
949 | #include <ATen/ops/hardsigmoid_backward_native.h> |
950 | #include <ATen/ops/hardsigmoid_backward_ops.h> |
951 | #include <ATen/ops/log_sigmoid_native.h> |
952 | #include <ATen/ops/log_sigmoid_ops.h> |
953 | #include <ATen/ops/log_sigmoid_native.h> |
954 | #include <ATen/ops/log_sigmoid_ops.h> |
955 | #include <ATen/ops/log_sigmoid_forward_native.h> |
956 | #include <ATen/ops/log_sigmoid_forward_ops.h> |
957 | #include <ATen/ops/log_sigmoid_forward_native.h> |
958 | #include <ATen/ops/log_sigmoid_forward_ops.h> |
959 | #include <ATen/ops/rrelu_with_noise_native.h> |
960 | #include <ATen/ops/rrelu_with_noise_ops.h> |
961 | #include <ATen/ops/rrelu_with_noise_native.h> |
962 | #include <ATen/ops/rrelu_with_noise_ops.h> |
963 | #include <ATen/ops/rrelu_with_noise_native.h> |
964 | #include <ATen/ops/rrelu_with_noise_ops.h> |
965 | #include <ATen/ops/rrelu_with_noise_backward_native.h> |
966 | #include <ATen/ops/rrelu_with_noise_backward_ops.h> |
967 | #include <ATen/ops/rrelu_with_noise_backward_native.h> |
968 | #include <ATen/ops/rrelu_with_noise_backward_ops.h> |
969 | #include <ATen/ops/softplus_backward_native.h> |
970 | #include <ATen/ops/softplus_backward_ops.h> |
971 | #include <ATen/ops/softplus_backward_native.h> |
972 | #include <ATen/ops/softplus_backward_ops.h> |
973 | #include <ATen/ops/mkldnn_adaptive_avg_pool2d_native.h> |
974 | #include <ATen/ops/mkldnn_adaptive_avg_pool2d_ops.h> |
975 | #include <ATen/ops/mkldnn_adaptive_avg_pool2d_native.h> |
976 | #include <ATen/ops/mkldnn_adaptive_avg_pool2d_ops.h> |
977 | #include <ATen/ops/adaptive_max_pool2d_native.h> |
978 | #include <ATen/ops/adaptive_max_pool2d_ops.h> |
979 | #include <ATen/ops/adaptive_max_pool2d_native.h> |
980 | #include <ATen/ops/adaptive_max_pool2d_ops.h> |
981 | #include <ATen/ops/adaptive_max_pool3d_native.h> |
982 | #include <ATen/ops/adaptive_max_pool3d_ops.h> |
983 | #include <ATen/ops/adaptive_max_pool3d_native.h> |
984 | #include <ATen/ops/adaptive_max_pool3d_ops.h> |
985 | #include <ATen/ops/avg_pool2d_backward_native.h> |
986 | #include <ATen/ops/avg_pool2d_backward_ops.h> |
987 | #include <ATen/ops/avg_pool2d_backward_native.h> |
988 | #include <ATen/ops/avg_pool2d_backward_ops.h> |
989 | #include <ATen/ops/max_pool2d_with_indices_native.h> |
990 | #include <ATen/ops/max_pool2d_with_indices_ops.h> |
991 | #include <ATen/ops/max_pool2d_with_indices_native.h> |
992 | #include <ATen/ops/max_pool2d_with_indices_ops.h> |
993 | #include <ATen/ops/upsample_linear1d_native.h> |
994 | #include <ATen/ops/upsample_linear1d_ops.h> |
995 | #include <ATen/ops/upsample_linear1d_native.h> |
996 | #include <ATen/ops/upsample_linear1d_ops.h> |
997 | #include <ATen/ops/upsample_linear1d_backward_native.h> |
998 | #include <ATen/ops/upsample_linear1d_backward_ops.h> |
999 | #include <ATen/ops/upsample_linear1d_backward_native.h> |
1000 | #include <ATen/ops/upsample_linear1d_backward_ops.h> |
1001 | #include <ATen/ops/upsample_bicubic2d_native.h> |
1002 | #include <ATen/ops/upsample_bicubic2d_ops.h> |
1003 | #include <ATen/ops/upsample_bicubic2d_native.h> |
1004 | #include <ATen/ops/upsample_bicubic2d_ops.h> |
1005 | #include <ATen/ops/upsample_bicubic2d_backward_native.h> |
1006 | #include <ATen/ops/upsample_bicubic2d_backward_ops.h> |
1007 | #include <ATen/ops/upsample_bicubic2d_backward_native.h> |
1008 | #include <ATen/ops/upsample_bicubic2d_backward_ops.h> |
1009 | #include <ATen/ops/_upsample_bicubic2d_aa_backward_native.h> |
1010 | #include <ATen/ops/_upsample_bicubic2d_aa_backward_ops.h> |
1011 | #include <ATen/ops/_upsample_bicubic2d_aa_backward_native.h> |
1012 | #include <ATen/ops/_upsample_bicubic2d_aa_backward_ops.h> |
1013 | #include <ATen/ops/upsample_nearest1d_native.h> |
1014 | #include <ATen/ops/upsample_nearest1d_ops.h> |
1015 | #include <ATen/ops/upsample_nearest1d_native.h> |
1016 | #include <ATen/ops/upsample_nearest1d_ops.h> |
1017 | #include <ATen/ops/_upsample_nearest_exact1d_native.h> |
1018 | #include <ATen/ops/_upsample_nearest_exact1d_ops.h> |
1019 | #include <ATen/ops/_upsample_nearest_exact1d_native.h> |
1020 | #include <ATen/ops/_upsample_nearest_exact1d_ops.h> |
1021 | #include <ATen/ops/upsample_nearest1d_backward_native.h> |
1022 | #include <ATen/ops/upsample_nearest1d_backward_ops.h> |
1023 | #include <ATen/ops/upsample_nearest1d_backward_native.h> |
1024 | #include <ATen/ops/upsample_nearest1d_backward_ops.h> |
1025 | #include <ATen/ops/upsample_nearest3d_native.h> |
1026 | #include <ATen/ops/upsample_nearest3d_ops.h> |
1027 | #include <ATen/ops/upsample_nearest3d_native.h> |
1028 | #include <ATen/ops/upsample_nearest3d_ops.h> |
1029 | #include <ATen/ops/slow_conv_transpose3d_native.h> |
1030 | #include <ATen/ops/slow_conv_transpose3d_ops.h> |
1031 | #include <ATen/ops/slow_conv_transpose3d_native.h> |
1032 | #include <ATen/ops/slow_conv_transpose3d_ops.h> |
1033 | #include <ATen/ops/isposinf_native.h> |
1034 | #include <ATen/ops/isposinf_ops.h> |
1035 | #include <ATen/ops/isposinf_native.h> |
1036 | #include <ATen/ops/isposinf_ops.h> |
1037 | #include <ATen/ops/special_entr_native.h> |
1038 | #include <ATen/ops/special_entr_ops.h> |
1039 | #include <ATen/ops/special_entr_native.h> |
1040 | #include <ATen/ops/special_entr_ops.h> |
1041 | #include <ATen/ops/special_psi_native.h> |
1042 | #include <ATen/ops/special_psi_ops.h> |
1043 | #include <ATen/ops/special_psi_native.h> |
1044 | #include <ATen/ops/special_psi_ops.h> |
1045 | #include <ATen/ops/special_erfinv_native.h> |
1046 | #include <ATen/ops/special_erfinv_ops.h> |
1047 | #include <ATen/ops/special_erfinv_native.h> |
1048 | #include <ATen/ops/special_erfinv_ops.h> |
1049 | #include <ATen/ops/special_ndtr_native.h> |
1050 | #include <ATen/ops/special_ndtr_ops.h> |
1051 | #include <ATen/ops/special_ndtr_native.h> |
1052 | #include <ATen/ops/special_ndtr_ops.h> |
1053 | #include <ATen/ops/special_xlogy_native.h> |
1054 | #include <ATen/ops/special_xlogy_ops.h> |
1055 | #include <ATen/ops/special_xlogy_native.h> |
1056 | #include <ATen/ops/special_xlogy_ops.h> |
1057 | #include <ATen/ops/special_xlogy_native.h> |
1058 | #include <ATen/ops/special_xlogy_ops.h> |
1059 | #include <ATen/ops/special_xlogy_native.h> |
1060 | #include <ATen/ops/special_xlogy_ops.h> |
1061 | #include <ATen/ops/special_xlogy_native.h> |
1062 | #include <ATen/ops/special_xlogy_ops.h> |
1063 | #include <ATen/ops/special_xlogy_native.h> |
1064 | #include <ATen/ops/special_xlogy_ops.h> |
1065 | #include <ATen/ops/special_zeta_native.h> |
1066 | #include <ATen/ops/special_zeta_ops.h> |
1067 | #include <ATen/ops/special_zeta_native.h> |
1068 | #include <ATen/ops/special_zeta_ops.h> |
1069 | #include <ATen/ops/special_zeta_native.h> |
1070 | #include <ATen/ops/special_zeta_ops.h> |
1071 | #include <ATen/ops/special_zeta_native.h> |
1072 | #include <ATen/ops/special_zeta_ops.h> |
1073 | #include <ATen/ops/special_zeta_native.h> |
1074 | #include <ATen/ops/special_zeta_ops.h> |
1075 | #include <ATen/ops/special_zeta_native.h> |
1076 | #include <ATen/ops/special_zeta_ops.h> |
1077 | #include <ATen/ops/special_i0_native.h> |
1078 | #include <ATen/ops/special_i0_ops.h> |
1079 | #include <ATen/ops/special_i0_native.h> |
1080 | #include <ATen/ops/special_i0_ops.h> |
1081 | #include <ATen/ops/special_i0e_native.h> |
1082 | #include <ATen/ops/special_i0e_ops.h> |
1083 | #include <ATen/ops/special_i0e_native.h> |
1084 | #include <ATen/ops/special_i0e_ops.h> |
1085 | #include <ATen/ops/special_expit_native.h> |
1086 | #include <ATen/ops/special_expit_ops.h> |
1087 | #include <ATen/ops/special_expit_native.h> |
1088 | #include <ATen/ops/special_expit_ops.h> |
1089 | #include <ATen/ops/special_round_native.h> |
1090 | #include <ATen/ops/special_round_ops.h> |
1091 | #include <ATen/ops/special_round_native.h> |
1092 | #include <ATen/ops/special_round_ops.h> |
1093 | #include <ATen/ops/special_gammainc_native.h> |
1094 | #include <ATen/ops/special_gammainc_ops.h> |
1095 | #include <ATen/ops/special_gammainc_native.h> |
1096 | #include <ATen/ops/special_gammainc_ops.h> |
1097 | #include <ATen/ops/fft_irfft_native.h> |
1098 | #include <ATen/ops/fft_irfft_ops.h> |
1099 | #include <ATen/ops/fft_irfft_native.h> |
1100 | #include <ATen/ops/fft_irfft_ops.h> |
1101 | #include <ATen/ops/fft_fft2_native.h> |
1102 | #include <ATen/ops/fft_fft2_ops.h> |
1103 | #include <ATen/ops/fft_fft2_native.h> |
1104 | #include <ATen/ops/fft_fft2_ops.h> |
1105 | #include <ATen/ops/fft_rfftfreq_native.h> |
1106 | #include <ATen/ops/fft_rfftfreq_ops.h> |
1107 | #include <ATen/ops/fft_rfftfreq_native.h> |
1108 | #include <ATen/ops/fft_rfftfreq_ops.h> |
1109 | #include <ATen/ops/linalg_cholesky_native.h> |
1110 | #include <ATen/ops/linalg_cholesky_ops.h> |
1111 | #include <ATen/ops/linalg_cholesky_native.h> |
1112 | #include <ATen/ops/linalg_cholesky_ops.h> |
1113 | #include <ATen/ops/linalg_lu_native.h> |
1114 | #include <ATen/ops/linalg_lu_ops.h> |
1115 | #include <ATen/ops/linalg_lu_native.h> |
1116 | #include <ATen/ops/linalg_lu_ops.h> |
1117 | #include <ATen/ops/_linalg_det_native.h> |
1118 | #include <ATen/ops/_linalg_det_ops.h> |
1119 | #include <ATen/ops/_linalg_det_native.h> |
1120 | #include <ATen/ops/_linalg_det_ops.h> |
1121 | #include <ATen/ops/slogdet_native.h> |
1122 | #include <ATen/ops/slogdet_ops.h> |
1123 | #include <ATen/ops/slogdet_native.h> |
1124 | #include <ATen/ops/slogdet_ops.h> |
1125 | #include <ATen/ops/linalg_eig_native.h> |
1126 | #include <ATen/ops/linalg_eig_ops.h> |
1127 | #include <ATen/ops/linalg_eig_native.h> |
1128 | #include <ATen/ops/linalg_eig_ops.h> |
1129 | #include <ATen/ops/linalg_inv_native.h> |
1130 | #include <ATen/ops/linalg_inv_ops.h> |
1131 | #include <ATen/ops/linalg_inv_native.h> |
1132 | #include <ATen/ops/linalg_inv_ops.h> |
1133 | #include <ATen/ops/_linalg_svd_native.h> |
1134 | #include <ATen/ops/_linalg_svd_ops.h> |
1135 | #include <ATen/ops/_linalg_svd_native.h> |
1136 | #include <ATen/ops/_linalg_svd_ops.h> |
1137 | #include <ATen/ops/linalg_svdvals_native.h> |
1138 | #include <ATen/ops/linalg_svdvals_ops.h> |
1139 | #include <ATen/ops/linalg_svdvals_native.h> |
1140 | #include <ATen/ops/linalg_svdvals_ops.h> |
1141 | #include <ATen/ops/linalg_pinv_native.h> |
1142 | #include <ATen/ops/linalg_pinv_ops.h> |
1143 | #include <ATen/ops/linalg_pinv_native.h> |
1144 | #include <ATen/ops/linalg_pinv_ops.h> |
1145 | #include <ATen/ops/linalg_pinv_native.h> |
1146 | #include <ATen/ops/linalg_pinv_ops.h> |
1147 | #include <ATen/ops/linalg_pinv_native.h> |
1148 | #include <ATen/ops/linalg_pinv_ops.h> |
1149 | #include <ATen/ops/linalg_pinv_native.h> |
1150 | #include <ATen/ops/linalg_pinv_ops.h> |
1151 | #include <ATen/ops/linalg_pinv_native.h> |
1152 | #include <ATen/ops/linalg_pinv_ops.h> |
1153 | #include <ATen/ops/linalg_pinv_native.h> |
1154 | #include <ATen/ops/linalg_pinv_ops.h> |
1155 | #include <ATen/ops/linalg_pinv_native.h> |
1156 | #include <ATen/ops/linalg_pinv_ops.h> |
1157 | #include <ATen/ops/linalg_qr_native.h> |
1158 | #include <ATen/ops/linalg_qr_ops.h> |
1159 | #include <ATen/ops/linalg_qr_native.h> |
1160 | #include <ATen/ops/linalg_qr_ops.h> |
1161 | #include <ATen/ops/_test_optional_intlist_native.h> |
1162 | #include <ATen/ops/_test_optional_intlist_ops.h> |
1163 | #include <ATen/ops/_test_optional_intlist_native.h> |
1164 | #include <ATen/ops/_test_optional_intlist_ops.h> |
1165 | #include <ATen/ops/_fw_primal_copy_native.h> |
1166 | #include <ATen/ops/_fw_primal_copy_ops.h> |
1167 | #include <ATen/ops/_fw_primal_copy_native.h> |
1168 | #include <ATen/ops/_fw_primal_copy_ops.h> |
1169 | #include <ATen/ops/as_strided_copy_native.h> |
1170 | #include <ATen/ops/as_strided_copy_ops.h> |
1171 | #include <ATen/ops/as_strided_copy_native.h> |
1172 | #include <ATen/ops/as_strided_copy_ops.h> |
1173 | #include <ATen/ops/expand_copy_native.h> |
1174 | #include <ATen/ops/expand_copy_ops.h> |
1175 | #include <ATen/ops/expand_copy_native.h> |
1176 | #include <ATen/ops/expand_copy_ops.h> |
1177 | #include <ATen/ops/_reshape_alias_copy_native.h> |
1178 | #include <ATen/ops/_reshape_alias_copy_ops.h> |
1179 | #include <ATen/ops/_reshape_alias_copy_native.h> |
1180 | #include <ATen/ops/_reshape_alias_copy_ops.h> |
1181 | #include <ATen/ops/select_copy_native.h> |
1182 | #include <ATen/ops/select_copy_ops.h> |
1183 | #include <ATen/ops/select_copy_native.h> |
1184 | #include <ATen/ops/select_copy_ops.h> |
1185 | #include <ATen/ops/split_with_sizes_copy_native.h> |
1186 | #include <ATen/ops/split_with_sizes_copy_ops.h> |
1187 | #include <ATen/ops/split_with_sizes_copy_native.h> |
1188 | #include <ATen/ops/split_with_sizes_copy_ops.h> |
1189 | #include <ATen/ops/squeeze_copy_native.h> |
1190 | #include <ATen/ops/squeeze_copy_ops.h> |
1191 | #include <ATen/ops/squeeze_copy_native.h> |
1192 | #include <ATen/ops/squeeze_copy_ops.h> |
1193 | #include <ATen/ops/squeeze_copy_native.h> |
1194 | #include <ATen/ops/squeeze_copy_ops.h> |
1195 | #include <ATen/ops/squeeze_copy_native.h> |
1196 | #include <ATen/ops/squeeze_copy_ops.h> |
1197 | #include <ATen/ops/squeeze_copy_native.h> |
1198 | #include <ATen/ops/squeeze_copy_ops.h> |
1199 | #include <ATen/ops/squeeze_copy_native.h> |
1200 | #include <ATen/ops/squeeze_copy_ops.h> |
1201 | #include <ATen/ops/_indices_copy_native.h> |
1202 | #include <ATen/ops/_indices_copy_ops.h> |
1203 | #include <ATen/ops/_indices_copy_native.h> |
1204 | #include <ATen/ops/_indices_copy_ops.h> |
1205 | #include <ATen/ops/_values_copy_native.h> |
1206 | #include <ATen/ops/_values_copy_ops.h> |
1207 | #include <ATen/ops/_values_copy_native.h> |
1208 | #include <ATen/ops/_values_copy_ops.h> |
1209 | #include <ATen/ops/crow_indices_copy_native.h> |
1210 | #include <ATen/ops/crow_indices_copy_ops.h> |
1211 | #include <ATen/ops/crow_indices_copy_native.h> |
1212 | #include <ATen/ops/crow_indices_copy_ops.h> |
1213 | #include <ATen/ops/col_indices_copy_native.h> |
1214 | #include <ATen/ops/col_indices_copy_ops.h> |
1215 | #include <ATen/ops/col_indices_copy_native.h> |
1216 | #include <ATen/ops/col_indices_copy_ops.h> |
1217 | #include <ATen/ops/unbind_copy_native.h> |
1218 | #include <ATen/ops/unbind_copy_ops.h> |
1219 | #include <ATen/ops/unbind_copy_native.h> |
1220 | #include <ATen/ops/unbind_copy_ops.h> |
1221 | #include <ATen/ops/view_copy_native.h> |
1222 | #include <ATen/ops/view_copy_ops.h> |
1223 | #include <ATen/ops/view_copy_native.h> |
1224 | #include <ATen/ops/view_copy_ops.h> |
1225 | #include <ATen/ops/view_copy_native.h> |
1226 | #include <ATen/ops/view_copy_ops.h> |
1227 | #include <ATen/ops/view_copy_native.h> |
1228 | #include <ATen/ops/view_copy_ops.h> |
1229 | #include <ATen/ops/alias_copy_native.h> |
1230 | #include <ATen/ops/alias_copy_ops.h> |
1231 | #include <ATen/ops/alias_copy_native.h> |
1232 | #include <ATen/ops/alias_copy_ops.h> |
1233 | #include <ATen/ops/special_airy_ai_native.h> |
1234 | #include <ATen/ops/special_airy_ai_ops.h> |
1235 | #include <ATen/ops/special_airy_ai_native.h> |
1236 | #include <ATen/ops/special_airy_ai_ops.h> |
1237 | #include <ATen/ops/special_bessel_j0_native.h> |
1238 | #include <ATen/ops/special_bessel_j0_ops.h> |
1239 | #include <ATen/ops/special_bessel_j0_native.h> |
1240 | #include <ATen/ops/special_bessel_j0_ops.h> |
1241 | #include <ATen/ops/special_chebyshev_polynomial_v_native.h> |
1242 | #include <ATen/ops/special_chebyshev_polynomial_v_ops.h> |
1243 | #include <ATen/ops/special_chebyshev_polynomial_v_native.h> |
1244 | #include <ATen/ops/special_chebyshev_polynomial_v_ops.h> |
1245 | #include <ATen/ops/special_chebyshev_polynomial_v_native.h> |
1246 | #include <ATen/ops/special_chebyshev_polynomial_v_ops.h> |
1247 | #include <ATen/ops/special_chebyshev_polynomial_v_native.h> |
1248 | #include <ATen/ops/special_chebyshev_polynomial_v_ops.h> |
1249 | #include <ATen/ops/special_chebyshev_polynomial_v_native.h> |
1250 | #include <ATen/ops/special_chebyshev_polynomial_v_ops.h> |
1251 | #include <ATen/ops/special_chebyshev_polynomial_v_native.h> |
1252 | #include <ATen/ops/special_chebyshev_polynomial_v_ops.h> |
1253 | #include <ATen/ops/special_chebyshev_polynomial_w_native.h> |
1254 | #include <ATen/ops/special_chebyshev_polynomial_w_ops.h> |
1255 | #include <ATen/ops/special_chebyshev_polynomial_w_native.h> |
1256 | #include <ATen/ops/special_chebyshev_polynomial_w_ops.h> |
1257 | #include <ATen/ops/special_chebyshev_polynomial_w_native.h> |
1258 | #include <ATen/ops/special_chebyshev_polynomial_w_ops.h> |
1259 | #include <ATen/ops/special_chebyshev_polynomial_w_native.h> |
1260 | #include <ATen/ops/special_chebyshev_polynomial_w_ops.h> |
1261 | #include <ATen/ops/special_chebyshev_polynomial_w_native.h> |
1262 | #include <ATen/ops/special_chebyshev_polynomial_w_ops.h> |
1263 | #include <ATen/ops/special_chebyshev_polynomial_w_native.h> |
1264 | #include <ATen/ops/special_chebyshev_polynomial_w_ops.h> |
1265 | #include <ATen/ops/special_hermite_polynomial_he_native.h> |
1266 | #include <ATen/ops/special_hermite_polynomial_he_ops.h> |
1267 | #include <ATen/ops/special_hermite_polynomial_he_native.h> |
1268 | #include <ATen/ops/special_hermite_polynomial_he_ops.h> |
1269 | #include <ATen/ops/special_hermite_polynomial_he_native.h> |
1270 | #include <ATen/ops/special_hermite_polynomial_he_ops.h> |
1271 | #include <ATen/ops/special_hermite_polynomial_he_native.h> |
1272 | #include <ATen/ops/special_hermite_polynomial_he_ops.h> |
1273 | #include <ATen/ops/special_hermite_polynomial_he_native.h> |
1274 | #include <ATen/ops/special_hermite_polynomial_he_ops.h> |
1275 | #include <ATen/ops/special_hermite_polynomial_he_native.h> |
1276 | #include <ATen/ops/special_hermite_polynomial_he_ops.h> |
1277 | #include <ATen/ops/special_laguerre_polynomial_l_native.h> |
1278 | #include <ATen/ops/special_laguerre_polynomial_l_ops.h> |
1279 | #include <ATen/ops/special_laguerre_polynomial_l_native.h> |
1280 | #include <ATen/ops/special_laguerre_polynomial_l_ops.h> |
1281 | #include <ATen/ops/special_laguerre_polynomial_l_native.h> |
1282 | #include <ATen/ops/special_laguerre_polynomial_l_ops.h> |
1283 | #include <ATen/ops/special_laguerre_polynomial_l_native.h> |
1284 | #include <ATen/ops/special_laguerre_polynomial_l_ops.h> |
1285 | #include <ATen/ops/special_laguerre_polynomial_l_native.h> |
1286 | #include <ATen/ops/special_laguerre_polynomial_l_ops.h> |
1287 | #include <ATen/ops/special_laguerre_polynomial_l_native.h> |
1288 | #include <ATen/ops/special_laguerre_polynomial_l_ops.h> |
1289 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h> |
1290 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h> |
1291 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h> |
1292 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h> |
1293 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h> |
1294 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h> |
1295 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h> |
1296 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h> |
1297 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h> |
1298 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h> |
1299 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h> |
1300 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h> |
1301 | #include <ATen/ops/_fused_adamw_native.h> |
1302 | #include <ATen/ops/_fused_adamw_ops.h> |
1303 | #include <ATen/ops/_fused_adamw_native.h> |
1304 | #include <ATen/ops/_fused_adamw_ops.h> |
1305 | #include <ATen/ops/_fused_adamw_native.h> |
1306 | #include <ATen/ops/_fused_adamw_ops.h> |
1307 | #include <ATen/ops/_fw_primal_native.h> |
1308 | #include <ATen/ops/_fw_primal_ops.h> |
1309 | #include <ATen/ops/_fw_primal_copy_native.h> |
1310 | #include <ATen/ops/_fw_primal_copy_ops.h> |
1311 | #include <ATen/ops/_make_dual_native.h> |
1312 | #include <ATen/ops/_make_dual_ops.h> |
1313 | #include <ATen/ops/_make_dual_copy_native.h> |
1314 | #include <ATen/ops/_make_dual_copy_ops.h> |
1315 | #include <ATen/ops/_unpack_dual_native.h> |
1316 | #include <ATen/ops/_unpack_dual_ops.h> |
1317 | #include <ATen/ops/align_to_native.h> |
1318 | #include <ATen/ops/align_to_ops.h> |
1319 | #include <ATen/ops/align_to_native.h> |
1320 | #include <ATen/ops/align_to_ops.h> |
1321 | #include <ATen/ops/view_as_complex_native.h> |
1322 | #include <ATen/ops/view_as_complex_ops.h> |
1323 | #include <ATen/ops/view_as_complex_copy_native.h> |
1324 | #include <ATen/ops/view_as_complex_copy_ops.h> |
1325 | #include <ATen/ops/resolve_neg_native.h> |
1326 | #include <ATen/ops/resolve_neg_ops.h> |
1327 | #include <ATen/ops/linalg_diagonal_native.h> |
1328 | #include <ATen/ops/linalg_diagonal_ops.h> |
1329 | #include <ATen/ops/expand_native.h> |
1330 | #include <ATen/ops/expand_ops.h> |
1331 | #include <ATen/ops/expand_copy_native.h> |
1332 | #include <ATen/ops/expand_copy_ops.h> |
1333 | #include <ATen/ops/matrix_H_native.h> |
1334 | #include <ATen/ops/matrix_H_ops.h> |
1335 | #include <ATen/ops/adjoint_native.h> |
1336 | #include <ATen/ops/adjoint_ops.h> |
1337 | #include <ATen/ops/reshape_as_native.h> |
1338 | #include <ATen/ops/reshape_as_ops.h> |
1339 | #include <ATen/ops/transpose_native.h> |
1340 | #include <ATen/ops/transpose_ops.h> |
1341 | #include <ATen/ops/transpose_copy_native.h> |
1342 | #include <ATen/ops/transpose_copy_ops.h> |
1343 | #include <ATen/ops/transpose_native.h> |
1344 | #include <ATen/ops/transpose_ops.h> |
1345 | #include <ATen/ops/_nested_view_from_buffer_native.h> |
1346 | #include <ATen/ops/_nested_view_from_buffer_ops.h> |
1347 | #include <ATen/ops/_nested_view_from_buffer_copy_native.h> |
1348 | #include <ATen/ops/_nested_view_from_buffer_copy_ops.h> |
1349 | #include <ATen/ops/unsqueeze_native.h> |
1350 | #include <ATen/ops/unsqueeze_ops.h> |
1351 | #include <ATen/ops/unsqueeze_copy_native.h> |
1352 | #include <ATen/ops/unsqueeze_copy_ops.h> |
1353 | #include <ATen/ops/_values_native.h> |
1354 | #include <ATen/ops/_values_ops.h> |
1355 | #include <ATen/ops/_values_copy_native.h> |
1356 | #include <ATen/ops/_values_copy_ops.h> |
1357 | #include <ATen/ops/ccol_indices_native.h> |
1358 | #include <ATen/ops/ccol_indices_ops.h> |
1359 | #include <ATen/ops/ccol_indices_copy_native.h> |
1360 | #include <ATen/ops/ccol_indices_copy_ops.h> |
1361 | #include <ATen/ops/_autocast_to_full_precision_native.h> |
1362 | #include <ATen/ops/_autocast_to_full_precision_ops.h> |
1363 | #include <ATen/ops/to_native.h> |
1364 | #include <ATen/ops/to_ops.h> |
1365 | #include <ATen/ops/to_native.h> |
1366 | #include <ATen/ops/to_ops.h> |
1367 | #include <ATen/ops/to_native.h> |
1368 | #include <ATen/ops/to_ops.h> |
1369 | #include <ATen/ops/to_native.h> |
1370 | #include <ATen/ops/to_ops.h> |
1371 | #include <ATen/ops/view_native.h> |
1372 | #include <ATen/ops/view_ops.h> |
1373 | #include <ATen/ops/view_copy_native.h> |
1374 | #include <ATen/ops/view_copy_ops.h> |
1375 | #include <ATen/ops/view_native.h> |
1376 | #include <ATen/ops/view_ops.h> |
1377 | #include <ATen/ops/view_copy_native.h> |
1378 | #include <ATen/ops/view_copy_ops.h> |
1379 | #include <ATen/ops/_cast_Long_native.h> |
1380 | #include <ATen/ops/_cast_Long_ops.h> |
1381 | #include <ATen/ops/_version_native.h> |
1382 | #include <ATen/ops/_version_ops.h> |
1383 | #include <ATen/ops/retain_grad_native.h> |
1384 | #include <ATen/ops/retain_grad_ops.h> |
1385 | #include <ATen/ops/retains_grad_native.h> |
1386 | #include <ATen/ops/retains_grad_ops.h> |
1387 | #include <ATen/ops/_has_same_storage_numel_native.h> |
1388 | #include <ATen/ops/_has_same_storage_numel_ops.h> |
1389 | #include <ATen/ops/align_tensors_native.h> |
1390 | #include <ATen/ops/align_tensors_ops.h> |
1391 | #include <ATen/ops/_assert_tensor_metadata_native.h> |
1392 | #include <ATen/ops/_assert_tensor_metadata_ops.h> |
1393 | #include <ATen/ops/_debug_has_internal_overlap_native.h> |
1394 | #include <ATen/ops/_debug_has_internal_overlap_ops.h> |
1395 | #include <ATen/ops/_sobol_engine_draw_native.h> |
1396 | #include <ATen/ops/_sobol_engine_draw_ops.h> |
1397 | #include <ATen/ops/_sobol_engine_scramble_native.h> |
1398 | #include <ATen/ops/_sobol_engine_scramble_ops.h> |
1399 | #include <ATen/ops/feature_dropout_native.h> |
1400 | #include <ATen/ops/feature_dropout_ops.h> |
1401 | #include <ATen/ops/feature_dropout_native.h> |
1402 | #include <ATen/ops/feature_dropout_ops.h> |
1403 | #include <ATen/ops/alpha_dropout_native.h> |
1404 | #include <ATen/ops/alpha_dropout_ops.h> |
1405 | #include <ATen/ops/alpha_dropout_native.h> |
1406 | #include <ATen/ops/alpha_dropout_ops.h> |
1407 | #include <ATen/ops/chalf_native.h> |
1408 | #include <ATen/ops/chalf_ops.h> |
1409 | #include <ATen/ops/avg_pool1d_native.h> |
1410 | #include <ATen/ops/avg_pool1d_ops.h> |
1411 | #include <ATen/ops/adaptive_avg_pool1d_native.h> |
1412 | #include <ATen/ops/adaptive_avg_pool1d_ops.h> |
1413 | #include <ATen/ops/affine_grid_generator_backward_native.h> |
1414 | #include <ATen/ops/affine_grid_generator_backward_ops.h> |
1415 | #include <ATen/ops/_test_check_tensor_native.h> |
1416 | #include <ATen/ops/_test_check_tensor_ops.h> |
1417 | #include <ATen/ops/atleast_3d_native.h> |
1418 | #include <ATen/ops/atleast_3d_ops.h> |
1419 | #include <ATen/ops/atleast_3d_native.h> |
1420 | #include <ATen/ops/atleast_3d_ops.h> |
1421 | #include <ATen/ops/batch_norm_native.h> |
1422 | #include <ATen/ops/batch_norm_ops.h> |
1423 | #include <ATen/ops/broadcast_tensors_native.h> |
1424 | #include <ATen/ops/broadcast_tensors_ops.h> |
1425 | #include <ATen/ops/_convolution_mode_native.h> |
1426 | #include <ATen/ops/_convolution_mode_ops.h> |
1427 | #include <ATen/ops/conv3d_native.h> |
1428 | #include <ATen/ops/conv3d_ops.h> |
1429 | #include <ATen/ops/conv3d_native.h> |
1430 | #include <ATen/ops/conv3d_ops.h> |
1431 | #include <ATen/ops/_cummax_helper_native.h> |
1432 | #include <ATen/ops/_cummax_helper_ops.h> |
1433 | #include <ATen/ops/_ctc_loss_backward_native.h> |
1434 | #include <ATen/ops/_ctc_loss_backward_ops.h> |
1435 | #include <ATen/ops/diagflat_native.h> |
1436 | #include <ATen/ops/diagflat_ops.h> |
1437 | #include <ATen/ops/fill_diagonal_native.h> |
1438 | #include <ATen/ops/fill_diagonal_ops.h> |
1439 | #include <ATen/ops/index_copy_native.h> |
1440 | #include <ATen/ops/index_copy_ops.h> |
1441 | #include <ATen/ops/index_copy_native.h> |
1442 | #include <ATen/ops/index_copy_ops.h> |
1443 | #include <ATen/ops/instance_norm_native.h> |
1444 | #include <ATen/ops/instance_norm_ops.h> |
1445 | #include <ATen/ops/is_complex_native.h> |
1446 | #include <ATen/ops/is_complex_ops.h> |
1447 | #include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_native.h> |
1448 | #include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_ops.h> |
1449 | #include <ATen/ops/fbgemm_linear_int8_weight_native.h> |
1450 | #include <ATen/ops/fbgemm_linear_int8_weight_ops.h> |
1451 | #include <ATen/ops/fbgemm_linear_quantize_weight_native.h> |
1452 | #include <ATen/ops/fbgemm_linear_quantize_weight_ops.h> |
1453 | #include <ATen/ops/fbgemm_linear_fp16_weight_native.h> |
1454 | #include <ATen/ops/fbgemm_linear_fp16_weight_ops.h> |
1455 | #include <ATen/ops/log_softmax_native.h> |
1456 | #include <ATen/ops/log_softmax_ops.h> |
1457 | #include <ATen/ops/max_pool1d_with_indices_native.h> |
1458 | #include <ATen/ops/max_pool1d_with_indices_ops.h> |
1459 | #include <ATen/ops/max_pool3d_native.h> |
1460 | #include <ATen/ops/max_pool3d_ops.h> |
1461 | #include <ATen/ops/mean_native.h> |
1462 | #include <ATen/ops/mean_ops.h> |
1463 | #include <ATen/ops/multiply_native.h> |
1464 | #include <ATen/ops/multiply_ops.h> |
1465 | #include <ATen/ops/multiply_native.h> |
1466 | #include <ATen/ops/multiply_ops.h> |
1467 | #include <ATen/ops/is_vulkan_available_native.h> |
1468 | #include <ATen/ops/is_vulkan_available_ops.h> |
1469 | #include <ATen/ops/cdist_native.h> |
1470 | #include <ATen/ops/cdist_ops.h> |
1471 | #include <ATen/ops/is_pinned_native.h> |
1472 | #include <ATen/ops/is_pinned_ops.h> |
1473 | #include <ATen/ops/poisson_nll_loss_native.h> |
1474 | #include <ATen/ops/poisson_nll_loss_ops.h> |
1475 | #include <ATen/ops/repeat_interleave_native.h> |
1476 | #include <ATen/ops/repeat_interleave_ops.h> |
1477 | #include <ATen/ops/repeat_interleave_native.h> |
1478 | #include <ATen/ops/repeat_interleave_ops.h> |
1479 | #include <ATen/ops/_prelu_kernel_native.h> |
1480 | #include <ATen/ops/_prelu_kernel_ops.h> |
1481 | #include <ATen/ops/infinitely_differentiable_gelu_backward_native.h> |
1482 | #include <ATen/ops/infinitely_differentiable_gelu_backward_ops.h> |
1483 | #include <ATen/ops/mish_backward_native.h> |
1484 | #include <ATen/ops/mish_backward_ops.h> |
1485 | #include <ATen/ops/_nested_tensor_offsets_native.h> |
1486 | #include <ATen/ops/_nested_tensor_offsets_ops.h> |
1487 | #include <ATen/ops/subtract_native.h> |
1488 | #include <ATen/ops/subtract_ops.h> |
1489 | #include <ATen/ops/subtract_native.h> |
1490 | #include <ATen/ops/subtract_ops.h> |
1491 | #include <ATen/ops/_sparse_mm_reduce_impl_backward_native.h> |
1492 | #include <ATen/ops/_sparse_mm_reduce_impl_backward_ops.h> |
1493 | #include <ATen/ops/sparse_compressed_tensor_native.h> |
1494 | #include <ATen/ops/sparse_compressed_tensor_ops.h> |
1495 | #include <ATen/ops/sparse_bsr_tensor_native.h> |
1496 | #include <ATen/ops/sparse_bsr_tensor_ops.h> |
1497 | #include <ATen/ops/sparse_compressed_tensor_native.h> |
1498 | #include <ATen/ops/sparse_compressed_tensor_ops.h> |
1499 | #include <ATen/ops/sparse_bsr_tensor_native.h> |
1500 | #include <ATen/ops/sparse_bsr_tensor_ops.h> |
1501 | #include <ATen/ops/_sparse_csc_tensor_unsafe_native.h> |
1502 | #include <ATen/ops/_sparse_csc_tensor_unsafe_ops.h> |
1503 | #include <ATen/ops/_sparse_coo_tensor_unsafe_native.h> |
1504 | #include <ATen/ops/_sparse_coo_tensor_unsafe_ops.h> |
1505 | #include <ATen/ops/_validate_sparse_coo_tensor_args_native.h> |
1506 | #include <ATen/ops/_validate_sparse_coo_tensor_args_ops.h> |
1507 | #include <ATen/ops/to_dense_native.h> |
1508 | #include <ATen/ops/to_dense_ops.h> |
1509 | #include <ATen/ops/to_dense_backward_native.h> |
1510 | #include <ATen/ops/to_dense_backward_ops.h> |
1511 | #include <ATen/ops/_nnz_native.h> |
1512 | #include <ATen/ops/_nnz_ops.h> |
1513 | #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_native.h> |
1514 | #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_ops.h> |
1515 | #include <ATen/ops/fake_quantize_per_channel_affine_native.h> |
1516 | #include <ATen/ops/fake_quantize_per_channel_affine_ops.h> |
1517 | #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_native.h> |
1518 | #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_ops.h> |
1519 | #include <ATen/ops/choose_qparams_optimized_native.h> |
1520 | #include <ATen/ops/choose_qparams_optimized_ops.h> |
1521 | #include <ATen/ops/combinations_native.h> |
1522 | #include <ATen/ops/combinations_ops.h> |
1523 | #include <ATen/ops/result_type_native.h> |
1524 | #include <ATen/ops/result_type_ops.h> |
1525 | #include <ATen/ops/result_type_native.h> |
1526 | #include <ATen/ops/result_type_ops.h> |
1527 | #include <ATen/ops/result_type_native.h> |
1528 | #include <ATen/ops/result_type_ops.h> |
1529 | #include <ATen/ops/result_type_native.h> |
1530 | #include <ATen/ops/result_type_ops.h> |
1531 | #include <ATen/ops/_thnn_differentiable_gru_cell_backward_native.h> |
1532 | #include <ATen/ops/_thnn_differentiable_gru_cell_backward_ops.h> |
1533 | #include <ATen/ops/lstm_native.h> |
1534 | #include <ATen/ops/lstm_ops.h> |
1535 | #include <ATen/ops/lstm_native.h> |
1536 | #include <ATen/ops/lstm_ops.h> |
1537 | #include <ATen/ops/gru_native.h> |
1538 | #include <ATen/ops/gru_ops.h> |
1539 | #include <ATen/ops/gru_native.h> |
1540 | #include <ATen/ops/gru_ops.h> |
1541 | #include <ATen/ops/lstm_cell_native.h> |
1542 | #include <ATen/ops/lstm_cell_ops.h> |
1543 | #include <ATen/ops/quantized_gru_cell_native.h> |
1544 | #include <ATen/ops/quantized_gru_cell_ops.h> |
1545 | #include <ATen/ops/quantized_rnn_tanh_cell_native.h> |
1546 | #include <ATen/ops/quantized_rnn_tanh_cell_ops.h> |
1547 | #include <ATen/ops/index_add_native.h> |
1548 | #include <ATen/ops/index_add_ops.h> |
1549 | #include <ATen/ops/index_fill_native.h> |
1550 | #include <ATen/ops/index_fill_ops.h> |
1551 | #include <ATen/ops/index_fill_native.h> |
1552 | #include <ATen/ops/index_fill_ops.h> |
1553 | #include <ATen/ops/index_fill_native.h> |
1554 | #include <ATen/ops/index_fill_ops.h> |
1555 | #include <ATen/ops/index_fill_native.h> |
1556 | #include <ATen/ops/index_fill_ops.h> |
1557 | #include <ATen/ops/scatter_native.h> |
1558 | #include <ATen/ops/scatter_ops.h> |
1559 | #include <ATen/ops/scatter_native.h> |
1560 | #include <ATen/ops/scatter_ops.h> |
1561 | #include <ATen/ops/scatter_add_native.h> |
1562 | #include <ATen/ops/scatter_add_ops.h> |
1563 | #include <ATen/ops/and_native.h> |
1564 | #include <ATen/ops/and_ops.h> |
1565 | #include <ATen/ops/and_native.h> |
1566 | #include <ATen/ops/and_ops.h> |
1567 | #include <ATen/ops/and_native.h> |
1568 | #include <ATen/ops/and_ops.h> |
1569 | #include <ATen/ops/and_native.h> |
1570 | #include <ATen/ops/and_ops.h> |
1571 | #include <ATen/ops/or_native.h> |
1572 | #include <ATen/ops/or_ops.h> |
1573 | #include <ATen/ops/or_native.h> |
1574 | #include <ATen/ops/or_ops.h> |
1575 | #include <ATen/ops/or_native.h> |
1576 | #include <ATen/ops/or_ops.h> |
1577 | #include <ATen/ops/or_native.h> |
1578 | #include <ATen/ops/or_ops.h> |
1579 | #include <ATen/ops/index_select_backward_native.h> |
1580 | #include <ATen/ops/index_select_backward_ops.h> |
1581 | #include <ATen/ops/nonzero_numpy_native.h> |
1582 | #include <ATen/ops/nonzero_numpy_ops.h> |
1583 | #include <ATen/ops/_linalg_check_errors_native.h> |
1584 | #include <ATen/ops/_linalg_check_errors_ops.h> |
1585 | #include <ATen/ops/_lu_with_info_native.h> |
1586 | #include <ATen/ops/_lu_with_info_ops.h> |
1587 | #include <ATen/ops/polygamma_native.h> |
1588 | #include <ATen/ops/polygamma_ops.h> |
1589 | #include <ATen/ops/nll_loss_nd_native.h> |
1590 | #include <ATen/ops/nll_loss_nd_ops.h> |
1591 | #include <ATen/ops/upsample_linear1d_native.h> |
1592 | #include <ATen/ops/upsample_linear1d_ops.h> |
1593 | #include <ATen/ops/upsample_bicubic2d_native.h> |
1594 | #include <ATen/ops/upsample_bicubic2d_ops.h> |
1595 | #include <ATen/ops/upsample_nearest1d_native.h> |
1596 | #include <ATen/ops/upsample_nearest1d_ops.h> |
1597 | #include <ATen/ops/_upsample_nearest_exact1d_native.h> |
1598 | #include <ATen/ops/_upsample_nearest_exact1d_ops.h> |
1599 | #include <ATen/ops/upsample_nearest3d_native.h> |
1600 | #include <ATen/ops/upsample_nearest3d_ops.h> |
1601 | #include <ATen/ops/det_native.h> |
1602 | #include <ATen/ops/det_ops.h> |
1603 | #include <ATen/ops/logdet_native.h> |
1604 | #include <ATen/ops/logdet_ops.h> |
1605 | #include <ATen/ops/_test_string_default_native.h> |
1606 | #include <ATen/ops/_test_string_default_ops.h> |
1607 | #include <ATen/ops/_scaled_dot_product_flash_attention_backward_native.h> |
1608 | #include <ATen/ops/_scaled_dot_product_flash_attention_backward_ops.h> |
1609 | #include <ATen/ops/_flash_attention_forward_native.h> |
1610 | #include <ATen/ops/_flash_attention_forward_ops.h> |
1611 | #include <ATen/ops/_efficient_attention_backward_native.h> |
1612 | #include <ATen/ops/_efficient_attention_backward_ops.h> |
1613 | #endif |
1614 | |
1615 | namespace at { |
1616 | namespace functionalization { |
1617 | |
1618 | // This keyset is used by functionalization when it calls into meta kernels |
1619 | // to accurately propagate stride metadata. |
1620 | // Exclude any modes: the purpose of calling into meta kernels is only as an implementation |
1621 | // detail to perform shape inference, and we don't want any modal keys to run. |
1622 | // Specifically, we want to prevent functionalization and Python modes from running. |
1623 | constexpr auto exclude_keys_for_meta_dispatch = |
1624 | c10::functorch_transforms_ks | |
1625 | c10::DispatchKeySet({ |
1626 | c10::DispatchKey::FuncTorchDynamicLayerBackMode, |
1627 | c10::DispatchKey::FuncTorchDynamicLayerFrontMode, |
1628 | c10::DispatchKey::Python |
1629 | }); |
1630 | |
1631 | |
1632 | inline Tensor to_meta(const Tensor& t) { |
1633 | if (!t.defined()) return t; |
1634 | return at::native::empty_strided_meta_symint(t.sym_sizes(), t.sym_strides(), |
1635 | /*dtype=*/c10::make_optional(t.scalar_type()), /*layout=*/c10::make_optional(t.layout()), |
1636 | /*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt); |
1637 | } |
1638 | |
1639 | inline c10::optional<Tensor> to_meta(const c10::optional<Tensor>& t) { |
1640 | if (t.has_value()) { |
1641 | return c10::make_optional<Tensor>(to_meta(*t)); |
1642 | } |
1643 | return c10::nullopt; |
1644 | } |
1645 | |
1646 | inline std::vector<Tensor> to_meta(at::ITensorListRef t_list) { |
1647 | std::vector<Tensor> outputs; |
1648 | outputs.reserve(t_list.size()); |
1649 | for (const auto& tensor : t_list) { |
1650 | outputs.push_back(to_meta(tensor)); |
1651 | } |
1652 | return outputs; |
1653 | } |
1654 | |
1655 | inline c10::List<Tensor> to_meta(const c10::List<Tensor>& t_list) { |
1656 | c10::List<Tensor> outputs; |
1657 | outputs.reserve(t_list.size()); |
1658 | for (const auto i : c10::irange(t_list.size())) { |
1659 | outputs.push_back(to_meta(t_list[i])); |
1660 | } |
1661 | return outputs; |
1662 | } |
1663 | |
1664 | inline c10::List<c10::optional<Tensor>> to_meta(const c10::List<c10::optional<Tensor>>& t_list) { |
1665 | c10::List<c10::optional<Tensor>> outputs; |
1666 | outputs.reserve(t_list.size()); |
1667 | for (const auto i : c10::irange(t_list.size())) { |
1668 | outputs.push_back(to_meta(t_list[i])); |
1669 | } |
1670 | return outputs; |
1671 | } |
1672 | |
1673 | |
1674 | |
1675 | at::Tensor & _masked_scale_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out) { |
1676 | if (false) { |
1677 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1678 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1679 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1680 | auto self_meta = to_meta(self); |
1681 | auto mask_meta = to_meta(mask); |
1682 | auto out_meta = to_meta(out); |
1683 | at::AutoDispatchSkipFunctionalize func_guard; |
1684 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1685 | at::_ops::_masked_scale_out::call(self_meta, mask_meta, scale, out_meta); |
1686 | } |
1687 | |
1688 | at::Tensor self_; |
1689 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
1690 | at::functionalization::impl::sync(self); |
1691 | self_ = at::functionalization::impl::from_functional_tensor(self); |
1692 | } else { |
1693 | self_ = self; |
1694 | } |
1695 | |
1696 | at::Tensor mask_; |
1697 | if (at::functionalization::impl::isFunctionalTensor(mask)) { |
1698 | at::functionalization::impl::sync(mask); |
1699 | mask_ = at::functionalization::impl::from_functional_tensor(mask); |
1700 | } else { |
1701 | mask_ = mask; |
1702 | } |
1703 | |
1704 | at::Tensor out_; |
1705 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
1706 | at::functionalization::impl::sync(out); |
1707 | out_ = at::functionalization::impl::from_functional_tensor(out); |
1708 | } else { |
1709 | out_ = out; |
1710 | } |
1711 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
1712 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask))) { |
1713 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1714 | TORCH_INTERNAL_ASSERT(false, |
1715 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1716 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1717 | } else { |
1718 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1719 | at::AutoDispatchSkipFunctionalize guard; |
1720 | at::Tensor tmp_output = at::_ops::_masked_scale_out::call(self_, mask_, scale, out_); |
1721 | return out;; |
1722 | } |
1723 | } else { |
1724 | at::Tensor tmp_output; |
1725 | { |
1726 | at::AutoDispatchSkipFunctionalize guard; |
1727 | tmp_output = at::_ops::_masked_scale::call(self_, mask_, scale); |
1728 | } |
1729 | at::functionalization::impl::replace_(out, tmp_output); |
1730 | at::functionalization::impl::commit_update(out); |
1731 | at::functionalization::impl::sync(out); |
1732 | return out; |
1733 | } |
1734 | } |
1735 | |
1736 | ::std::tuple<at::Tensor &,at::Tensor &> native_dropout_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, c10::optional<bool> train, at::Tensor & out0, at::Tensor & out1) { |
1737 | if (false) { |
1738 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1739 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1740 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1741 | auto input_meta = to_meta(input); |
1742 | auto out0_meta = to_meta(out0); |
1743 | auto out1_meta = to_meta(out1); |
1744 | at::AutoDispatchSkipFunctionalize func_guard; |
1745 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1746 | at::_ops::native_dropout_out::call(input_meta, p, train, out0_meta, out1_meta); |
1747 | } |
1748 | |
1749 | at::Tensor input_; |
1750 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
1751 | at::functionalization::impl::sync(input); |
1752 | input_ = at::functionalization::impl::from_functional_tensor(input); |
1753 | } else { |
1754 | input_ = input; |
1755 | } |
1756 | |
1757 | at::Tensor out0_; |
1758 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
1759 | at::functionalization::impl::sync(out0); |
1760 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
1761 | } else { |
1762 | out0_ = out0; |
1763 | } |
1764 | |
1765 | at::Tensor out1_; |
1766 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
1767 | at::functionalization::impl::sync(out1); |
1768 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
1769 | } else { |
1770 | out1_ = out1; |
1771 | } |
1772 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
1773 | if ((false || at::functionalization::impl::isFunctionalTensor(input))) { |
1774 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1775 | TORCH_INTERNAL_ASSERT(false, |
1776 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1777 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1778 | } else { |
1779 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1780 | at::AutoDispatchSkipFunctionalize guard; |
1781 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::native_dropout_out::call(input_, p, train, out0_, out1_); |
1782 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
1783 | } |
1784 | } else { |
1785 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
1786 | { |
1787 | at::AutoDispatchSkipFunctionalize guard; |
1788 | tmp_output = at::_ops::native_dropout::call(input_, p, train); |
1789 | } |
1790 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
1791 | at::functionalization::impl::commit_update(out0); |
1792 | at::functionalization::impl::sync(out0); |
1793 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
1794 | at::functionalization::impl::commit_update(out1); |
1795 | at::functionalization::impl::sync(out1); |
1796 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
1797 | } |
1798 | } |
1799 | |
1800 | at::Tensor & native_dropout_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, double scale, at::Tensor & out) { |
1801 | if (false) { |
1802 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1803 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1804 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1805 | auto grad_output_meta = to_meta(grad_output); |
1806 | auto mask_meta = to_meta(mask); |
1807 | auto out_meta = to_meta(out); |
1808 | at::AutoDispatchSkipFunctionalize func_guard; |
1809 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1810 | at::_ops::native_dropout_backward_out::call(grad_output_meta, mask_meta, scale, out_meta); |
1811 | } |
1812 | |
1813 | at::Tensor grad_output_; |
1814 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
1815 | at::functionalization::impl::sync(grad_output); |
1816 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
1817 | } else { |
1818 | grad_output_ = grad_output; |
1819 | } |
1820 | |
1821 | at::Tensor mask_; |
1822 | if (at::functionalization::impl::isFunctionalTensor(mask)) { |
1823 | at::functionalization::impl::sync(mask); |
1824 | mask_ = at::functionalization::impl::from_functional_tensor(mask); |
1825 | } else { |
1826 | mask_ = mask; |
1827 | } |
1828 | |
1829 | at::Tensor out_; |
1830 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
1831 | at::functionalization::impl::sync(out); |
1832 | out_ = at::functionalization::impl::from_functional_tensor(out); |
1833 | } else { |
1834 | out_ = out; |
1835 | } |
1836 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
1837 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(mask))) { |
1838 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1839 | TORCH_INTERNAL_ASSERT(false, |
1840 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1841 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1842 | } else { |
1843 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1844 | at::AutoDispatchSkipFunctionalize guard; |
1845 | at::Tensor tmp_output = at::_ops::native_dropout_backward_out::call(grad_output_, mask_, scale, out_); |
1846 | return out;; |
1847 | } |
1848 | } else { |
1849 | at::Tensor tmp_output; |
1850 | { |
1851 | at::AutoDispatchSkipFunctionalize guard; |
1852 | tmp_output = at::_ops::native_dropout_backward::call(grad_output_, mask_, scale); |
1853 | } |
1854 | at::functionalization::impl::replace_(out, tmp_output); |
1855 | at::functionalization::impl::commit_update(out); |
1856 | at::functionalization::impl::sync(out); |
1857 | return out; |
1858 | } |
1859 | } |
1860 | |
1861 | at::Tensor & asinh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
1862 | if (false) { |
1863 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1864 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1865 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1866 | auto self_meta = to_meta(self); |
1867 | auto out_meta = to_meta(out); |
1868 | at::AutoDispatchSkipFunctionalize func_guard; |
1869 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1870 | at::_ops::asinh_out::call(self_meta, out_meta); |
1871 | } |
1872 | |
1873 | at::Tensor self_; |
1874 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
1875 | at::functionalization::impl::sync(self); |
1876 | self_ = at::functionalization::impl::from_functional_tensor(self); |
1877 | } else { |
1878 | self_ = self; |
1879 | } |
1880 | |
1881 | at::Tensor out_; |
1882 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
1883 | at::functionalization::impl::sync(out); |
1884 | out_ = at::functionalization::impl::from_functional_tensor(out); |
1885 | } else { |
1886 | out_ = out; |
1887 | } |
1888 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
1889 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
1890 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1891 | TORCH_INTERNAL_ASSERT(false, |
1892 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1893 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1894 | } else { |
1895 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1896 | at::AutoDispatchSkipFunctionalize guard; |
1897 | at::Tensor tmp_output = at::_ops::asinh_out::call(self_, out_); |
1898 | return out;; |
1899 | } |
1900 | } else { |
1901 | at::Tensor tmp_output; |
1902 | { |
1903 | at::AutoDispatchSkipFunctionalize guard; |
1904 | tmp_output = at::_ops::asinh::call(self_); |
1905 | } |
1906 | at::functionalization::impl::replace_(out, tmp_output); |
1907 | at::functionalization::impl::commit_update(out); |
1908 | at::functionalization::impl::sync(out); |
1909 | return out; |
1910 | } |
1911 | } |
1912 | |
1913 | at::Tensor & asinh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
1914 | if (true) { |
1915 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1916 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1917 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1918 | auto self_meta = to_meta(self); |
1919 | at::AutoDispatchSkipFunctionalize func_guard; |
1920 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1921 | at::_ops::asinh_::call(self_meta); |
1922 | } |
1923 | |
1924 | at::Tensor self_; |
1925 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
1926 | at::functionalization::impl::sync(self); |
1927 | self_ = at::functionalization::impl::from_functional_tensor(self); |
1928 | } else { |
1929 | self_ = self; |
1930 | } |
1931 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
1932 | if ((false)) { |
1933 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1934 | TORCH_INTERNAL_ASSERT(false, |
1935 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1936 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1937 | } else { |
1938 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1939 | at::AutoDispatchSkipFunctionalize guard; |
1940 | at::Tensor tmp_output = at::_ops::asinh_::call(self_); |
1941 | return self;; |
1942 | } |
1943 | } else { |
1944 | at::Tensor tmp_output; |
1945 | { |
1946 | at::AutoDispatchSkipFunctionalize guard; |
1947 | tmp_output = at::_ops::asinh::call(self_); |
1948 | } |
1949 | at::functionalization::impl::replace_(self, tmp_output); |
1950 | at::functionalization::impl::commit_update(self); |
1951 | at::functionalization::impl::sync(self); |
1952 | return self; |
1953 | } |
1954 | } |
1955 | |
1956 | at::Tensor & arctanh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
1957 | if (false) { |
1958 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1959 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1960 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1961 | auto self_meta = to_meta(self); |
1962 | auto out_meta = to_meta(out); |
1963 | at::AutoDispatchSkipFunctionalize func_guard; |
1964 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1965 | at::_ops::arctanh_out::call(self_meta, out_meta); |
1966 | } |
1967 | |
1968 | at::Tensor self_; |
1969 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
1970 | at::functionalization::impl::sync(self); |
1971 | self_ = at::functionalization::impl::from_functional_tensor(self); |
1972 | } else { |
1973 | self_ = self; |
1974 | } |
1975 | |
1976 | at::Tensor out_; |
1977 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
1978 | at::functionalization::impl::sync(out); |
1979 | out_ = at::functionalization::impl::from_functional_tensor(out); |
1980 | } else { |
1981 | out_ = out; |
1982 | } |
1983 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
1984 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
1985 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1986 | TORCH_INTERNAL_ASSERT(false, |
1987 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1988 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1989 | } else { |
1990 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1991 | at::AutoDispatchSkipFunctionalize guard; |
1992 | at::Tensor tmp_output = at::_ops::arctanh_out::call(self_, out_); |
1993 | return out;; |
1994 | } |
1995 | } else { |
1996 | at::Tensor tmp_output; |
1997 | { |
1998 | at::AutoDispatchSkipFunctionalize guard; |
1999 | tmp_output = at::_ops::arctanh::call(self_); |
2000 | } |
2001 | at::functionalization::impl::replace_(out, tmp_output); |
2002 | at::functionalization::impl::commit_update(out); |
2003 | at::functionalization::impl::sync(out); |
2004 | return out; |
2005 | } |
2006 | } |
2007 | |
2008 | at::Tensor & arctanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
2009 | if (true) { |
2010 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2011 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2012 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2013 | auto self_meta = to_meta(self); |
2014 | at::AutoDispatchSkipFunctionalize func_guard; |
2015 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2016 | at::_ops::arctanh_::call(self_meta); |
2017 | } |
2018 | |
2019 | at::Tensor self_; |
2020 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2021 | at::functionalization::impl::sync(self); |
2022 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2023 | } else { |
2024 | self_ = self; |
2025 | } |
2026 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2027 | if ((false)) { |
2028 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2029 | TORCH_INTERNAL_ASSERT(false, |
2030 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2031 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2032 | } else { |
2033 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2034 | at::AutoDispatchSkipFunctionalize guard; |
2035 | at::Tensor tmp_output = at::_ops::arctanh_::call(self_); |
2036 | return self;; |
2037 | } |
2038 | } else { |
2039 | at::Tensor tmp_output; |
2040 | { |
2041 | at::AutoDispatchSkipFunctionalize guard; |
2042 | tmp_output = at::_ops::arctanh::call(self_); |
2043 | } |
2044 | at::functionalization::impl::replace_(self, tmp_output); |
2045 | at::functionalization::impl::commit_update(self); |
2046 | at::functionalization::impl::sync(self); |
2047 | return self; |
2048 | } |
2049 | } |
2050 | |
2051 | at::Tensor & baddbmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
2052 | if (false) { |
2053 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2054 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2055 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2056 | auto self_meta = to_meta(self); |
2057 | auto batch1_meta = to_meta(batch1); |
2058 | auto batch2_meta = to_meta(batch2); |
2059 | auto out_meta = to_meta(out); |
2060 | at::AutoDispatchSkipFunctionalize func_guard; |
2061 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2062 | at::_ops::baddbmm_out::call(self_meta, batch1_meta, batch2_meta, beta, alpha, out_meta); |
2063 | } |
2064 | |
2065 | at::Tensor self_; |
2066 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2067 | at::functionalization::impl::sync(self); |
2068 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2069 | } else { |
2070 | self_ = self; |
2071 | } |
2072 | |
2073 | at::Tensor batch1_; |
2074 | if (at::functionalization::impl::isFunctionalTensor(batch1)) { |
2075 | at::functionalization::impl::sync(batch1); |
2076 | batch1_ = at::functionalization::impl::from_functional_tensor(batch1); |
2077 | } else { |
2078 | batch1_ = batch1; |
2079 | } |
2080 | |
2081 | at::Tensor batch2_; |
2082 | if (at::functionalization::impl::isFunctionalTensor(batch2)) { |
2083 | at::functionalization::impl::sync(batch2); |
2084 | batch2_ = at::functionalization::impl::from_functional_tensor(batch2); |
2085 | } else { |
2086 | batch2_ = batch2; |
2087 | } |
2088 | |
2089 | at::Tensor out_; |
2090 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2091 | at::functionalization::impl::sync(out); |
2092 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2093 | } else { |
2094 | out_ = out; |
2095 | } |
2096 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2097 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(batch1) || at::functionalization::impl::isFunctionalTensor(batch2))) { |
2098 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2099 | TORCH_INTERNAL_ASSERT(false, |
2100 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2101 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2102 | } else { |
2103 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2104 | at::AutoDispatchSkipFunctionalize guard; |
2105 | at::Tensor tmp_output = at::_ops::baddbmm_out::call(self_, batch1_, batch2_, beta, alpha, out_); |
2106 | return out;; |
2107 | } |
2108 | } else { |
2109 | at::Tensor tmp_output; |
2110 | { |
2111 | at::AutoDispatchSkipFunctionalize guard; |
2112 | tmp_output = at::_ops::baddbmm::call(self_, batch1_, batch2_, beta, alpha); |
2113 | } |
2114 | at::functionalization::impl::replace_(out, tmp_output); |
2115 | at::functionalization::impl::commit_update(out); |
2116 | at::functionalization::impl::sync(out); |
2117 | return out; |
2118 | } |
2119 | } |
2120 | |
2121 | at::Tensor & baddbmm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) { |
2122 | if (true) { |
2123 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2124 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2125 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2126 | auto self_meta = to_meta(self); |
2127 | auto batch1_meta = to_meta(batch1); |
2128 | auto batch2_meta = to_meta(batch2); |
2129 | at::AutoDispatchSkipFunctionalize func_guard; |
2130 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2131 | at::_ops::baddbmm_::call(self_meta, batch1_meta, batch2_meta, beta, alpha); |
2132 | } |
2133 | |
2134 | at::Tensor self_; |
2135 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2136 | at::functionalization::impl::sync(self); |
2137 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2138 | } else { |
2139 | self_ = self; |
2140 | } |
2141 | |
2142 | at::Tensor batch1_; |
2143 | if (at::functionalization::impl::isFunctionalTensor(batch1)) { |
2144 | at::functionalization::impl::sync(batch1); |
2145 | batch1_ = at::functionalization::impl::from_functional_tensor(batch1); |
2146 | } else { |
2147 | batch1_ = batch1; |
2148 | } |
2149 | |
2150 | at::Tensor batch2_; |
2151 | if (at::functionalization::impl::isFunctionalTensor(batch2)) { |
2152 | at::functionalization::impl::sync(batch2); |
2153 | batch2_ = at::functionalization::impl::from_functional_tensor(batch2); |
2154 | } else { |
2155 | batch2_ = batch2; |
2156 | } |
2157 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2158 | if ((false || at::functionalization::impl::isFunctionalTensor(batch1) || at::functionalization::impl::isFunctionalTensor(batch2))) { |
2159 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2160 | TORCH_INTERNAL_ASSERT(false, |
2161 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2162 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2163 | } else { |
2164 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2165 | at::AutoDispatchSkipFunctionalize guard; |
2166 | at::Tensor tmp_output = at::_ops::baddbmm_::call(self_, batch1_, batch2_, beta, alpha); |
2167 | return self;; |
2168 | } |
2169 | } else { |
2170 | at::Tensor tmp_output; |
2171 | { |
2172 | at::AutoDispatchSkipFunctionalize guard; |
2173 | tmp_output = at::_ops::baddbmm::call(self_, batch1_, batch2_, beta, alpha); |
2174 | } |
2175 | at::functionalization::impl::replace_(self, tmp_output); |
2176 | at::functionalization::impl::commit_update(self); |
2177 | at::functionalization::impl::sync(self); |
2178 | return self; |
2179 | } |
2180 | } |
2181 | |
2182 | at::Tensor & quantized_batch_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out) { |
2183 | if (false) { |
2184 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2185 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2186 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2187 | auto input_meta = to_meta(input); |
2188 | auto weight_meta = to_meta(weight); |
2189 | auto bias_meta = to_meta(bias); |
2190 | auto mean_meta = to_meta(mean); |
2191 | auto var_meta = to_meta(var); |
2192 | auto out_meta = to_meta(out); |
2193 | at::AutoDispatchSkipFunctionalize func_guard; |
2194 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2195 | at::_ops::quantized_batch_norm_out::call(input_meta, weight_meta, bias_meta, mean_meta, var_meta, eps, output_scale, output_zero_point, out_meta); |
2196 | } |
2197 | |
2198 | at::Tensor input_; |
2199 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
2200 | at::functionalization::impl::sync(input); |
2201 | input_ = at::functionalization::impl::from_functional_tensor(input); |
2202 | } else { |
2203 | input_ = input; |
2204 | } |
2205 | |
2206 | c10::optional<at::Tensor> weight_; |
2207 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
2208 | at::functionalization::impl::sync(weight); |
2209 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
2210 | } else { |
2211 | weight_ = weight; |
2212 | } |
2213 | |
2214 | c10::optional<at::Tensor> bias_; |
2215 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
2216 | at::functionalization::impl::sync(bias); |
2217 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
2218 | } else { |
2219 | bias_ = bias; |
2220 | } |
2221 | |
2222 | at::Tensor mean_; |
2223 | if (at::functionalization::impl::isFunctionalTensor(mean)) { |
2224 | at::functionalization::impl::sync(mean); |
2225 | mean_ = at::functionalization::impl::from_functional_tensor(mean); |
2226 | } else { |
2227 | mean_ = mean; |
2228 | } |
2229 | |
2230 | at::Tensor var_; |
2231 | if (at::functionalization::impl::isFunctionalTensor(var)) { |
2232 | at::functionalization::impl::sync(var); |
2233 | var_ = at::functionalization::impl::from_functional_tensor(var); |
2234 | } else { |
2235 | var_ = var; |
2236 | } |
2237 | |
2238 | at::Tensor out_; |
2239 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2240 | at::functionalization::impl::sync(out); |
2241 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2242 | } else { |
2243 | out_ = out; |
2244 | } |
2245 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2246 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(var))) { |
2247 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2248 | TORCH_INTERNAL_ASSERT(false, |
2249 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2250 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2251 | } else { |
2252 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2253 | at::AutoDispatchSkipFunctionalize guard; |
2254 | at::Tensor tmp_output = at::_ops::quantized_batch_norm_out::call(input_, weight_, bias_, mean_, var_, eps, output_scale, output_zero_point, out_); |
2255 | return out;; |
2256 | } |
2257 | } else { |
2258 | at::Tensor tmp_output; |
2259 | { |
2260 | at::AutoDispatchSkipFunctionalize guard; |
2261 | tmp_output = at::_ops::quantized_batch_norm::call(input_, weight_, bias_, mean_, var_, eps, output_scale, output_zero_point); |
2262 | } |
2263 | at::functionalization::impl::replace_(out, tmp_output); |
2264 | at::functionalization::impl::commit_update(out); |
2265 | at::functionalization::impl::sync(out); |
2266 | return out; |
2267 | } |
2268 | } |
2269 | |
2270 | at::Tensor & bernoulli_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
2271 | if (false) { |
2272 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2273 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2274 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2275 | auto self_meta = to_meta(self); |
2276 | auto out_meta = to_meta(out); |
2277 | at::AutoDispatchSkipFunctionalize func_guard; |
2278 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2279 | at::_ops::bernoulli_out::call(self_meta, generator, out_meta); |
2280 | } |
2281 | |
2282 | at::Tensor self_; |
2283 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2284 | at::functionalization::impl::sync(self); |
2285 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2286 | } else { |
2287 | self_ = self; |
2288 | } |
2289 | |
2290 | at::Tensor out_; |
2291 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2292 | at::functionalization::impl::sync(out); |
2293 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2294 | } else { |
2295 | out_ = out; |
2296 | } |
2297 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2298 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2299 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2300 | TORCH_INTERNAL_ASSERT(false, |
2301 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2302 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2303 | } else { |
2304 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2305 | at::AutoDispatchSkipFunctionalize guard; |
2306 | at::Tensor tmp_output = at::_ops::bernoulli_out::call(self_, generator, out_); |
2307 | return out;; |
2308 | } |
2309 | } else { |
2310 | at::Tensor tmp_output; |
2311 | { |
2312 | at::AutoDispatchSkipFunctionalize guard; |
2313 | tmp_output = at::_ops::bernoulli::call(self_, generator); |
2314 | } |
2315 | at::functionalization::impl::replace_(out, tmp_output); |
2316 | at::functionalization::impl::commit_update(out); |
2317 | at::functionalization::impl::sync(out); |
2318 | return out; |
2319 | } |
2320 | } |
2321 | |
2322 | at::Tensor & bernoulli_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator, at::Tensor & out) { |
2323 | if (false) { |
2324 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2325 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2326 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2327 | auto self_meta = to_meta(self); |
2328 | auto p_meta = to_meta(p); |
2329 | auto out_meta = to_meta(out); |
2330 | at::AutoDispatchSkipFunctionalize func_guard; |
2331 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2332 | at::_ops::bernoulli_Tensor_out::call(self_meta, p_meta, generator, out_meta); |
2333 | } |
2334 | |
2335 | at::Tensor self_; |
2336 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2337 | at::functionalization::impl::sync(self); |
2338 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2339 | } else { |
2340 | self_ = self; |
2341 | } |
2342 | |
2343 | at::Tensor p_; |
2344 | if (at::functionalization::impl::isFunctionalTensor(p)) { |
2345 | at::functionalization::impl::sync(p); |
2346 | p_ = at::functionalization::impl::from_functional_tensor(p); |
2347 | } else { |
2348 | p_ = p; |
2349 | } |
2350 | |
2351 | at::Tensor out_; |
2352 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2353 | at::functionalization::impl::sync(out); |
2354 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2355 | } else { |
2356 | out_ = out; |
2357 | } |
2358 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2359 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(p))) { |
2360 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2361 | TORCH_INTERNAL_ASSERT(false, |
2362 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2363 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2364 | } else { |
2365 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2366 | at::AutoDispatchSkipFunctionalize guard; |
2367 | at::Tensor tmp_output = at::_ops::bernoulli_Tensor_out::call(self_, p_, generator, out_); |
2368 | return out;; |
2369 | } |
2370 | } else { |
2371 | at::Tensor tmp_output; |
2372 | { |
2373 | at::AutoDispatchSkipFunctionalize guard; |
2374 | tmp_output = at::_ops::bernoulli_Tensor::call(self_, p_, generator); |
2375 | } |
2376 | at::functionalization::impl::replace_(out, tmp_output); |
2377 | at::functionalization::impl::commit_update(out); |
2378 | at::functionalization::impl::sync(out); |
2379 | return out; |
2380 | } |
2381 | } |
2382 | |
2383 | at::Tensor & bernoulli__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) { |
2384 | if (true) { |
2385 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2386 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2387 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2388 | auto self_meta = to_meta(self); |
2389 | auto p_meta = to_meta(p); |
2390 | at::AutoDispatchSkipFunctionalize func_guard; |
2391 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2392 | at::_ops::bernoulli__Tensor::call(self_meta, p_meta, generator); |
2393 | } |
2394 | |
2395 | at::Tensor self_; |
2396 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2397 | at::functionalization::impl::sync(self); |
2398 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2399 | } else { |
2400 | self_ = self; |
2401 | } |
2402 | |
2403 | at::Tensor p_; |
2404 | if (at::functionalization::impl::isFunctionalTensor(p)) { |
2405 | at::functionalization::impl::sync(p); |
2406 | p_ = at::functionalization::impl::from_functional_tensor(p); |
2407 | } else { |
2408 | p_ = p; |
2409 | } |
2410 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2411 | if ((false || at::functionalization::impl::isFunctionalTensor(p))) { |
2412 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2413 | TORCH_INTERNAL_ASSERT(false, |
2414 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2415 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2416 | } else { |
2417 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2418 | at::AutoDispatchSkipFunctionalize guard; |
2419 | at::Tensor tmp_output = at::_ops::bernoulli__Tensor::call(self_, p_, generator); |
2420 | return self;; |
2421 | } |
2422 | } else { |
2423 | at::Tensor tmp_output; |
2424 | { |
2425 | at::AutoDispatchSkipFunctionalize guard; |
2426 | tmp_output = at::_ops::bernoulli_Tensor::call(self_, p_, generator); |
2427 | } |
2428 | at::functionalization::impl::replace_(self, tmp_output); |
2429 | at::functionalization::impl::commit_update(self); |
2430 | at::functionalization::impl::sync(self); |
2431 | return self; |
2432 | } |
2433 | } |
2434 | |
2435 | at::Tensor & bernoulli_out_float_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out) { |
2436 | if (false) { |
2437 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2438 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2439 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2440 | auto self_meta = to_meta(self); |
2441 | auto out_meta = to_meta(out); |
2442 | at::AutoDispatchSkipFunctionalize func_guard; |
2443 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2444 | at::_ops::bernoulli_float_out::call(self_meta, p, generator, out_meta); |
2445 | } |
2446 | |
2447 | at::Tensor self_; |
2448 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2449 | at::functionalization::impl::sync(self); |
2450 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2451 | } else { |
2452 | self_ = self; |
2453 | } |
2454 | |
2455 | at::Tensor out_; |
2456 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2457 | at::functionalization::impl::sync(out); |
2458 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2459 | } else { |
2460 | out_ = out; |
2461 | } |
2462 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2463 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2464 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2465 | TORCH_INTERNAL_ASSERT(false, |
2466 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2467 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2468 | } else { |
2469 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2470 | at::AutoDispatchSkipFunctionalize guard; |
2471 | at::Tensor tmp_output = at::_ops::bernoulli_float_out::call(self_, p, generator, out_); |
2472 | return out;; |
2473 | } |
2474 | } else { |
2475 | at::Tensor tmp_output; |
2476 | { |
2477 | at::AutoDispatchSkipFunctionalize guard; |
2478 | tmp_output = at::_ops::bernoulli_p::call(self_, p, generator); |
2479 | } |
2480 | at::functionalization::impl::replace_(out, tmp_output); |
2481 | at::functionalization::impl::commit_update(out); |
2482 | at::functionalization::impl::sync(out); |
2483 | return out; |
2484 | } |
2485 | } |
2486 | |
2487 | at::Tensor & bernoulli__float(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, c10::optional<at::Generator> generator) { |
2488 | if (true) { |
2489 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2490 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2491 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2492 | auto self_meta = to_meta(self); |
2493 | at::AutoDispatchSkipFunctionalize func_guard; |
2494 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2495 | at::_ops::bernoulli__float::call(self_meta, p, generator); |
2496 | } |
2497 | |
2498 | at::Tensor self_; |
2499 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2500 | at::functionalization::impl::sync(self); |
2501 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2502 | } else { |
2503 | self_ = self; |
2504 | } |
2505 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2506 | if ((false)) { |
2507 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2508 | TORCH_INTERNAL_ASSERT(false, |
2509 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2510 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2511 | } else { |
2512 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2513 | at::AutoDispatchSkipFunctionalize guard; |
2514 | at::Tensor tmp_output = at::_ops::bernoulli__float::call(self_, p, generator); |
2515 | return self;; |
2516 | } |
2517 | } else { |
2518 | at::Tensor tmp_output; |
2519 | { |
2520 | at::AutoDispatchSkipFunctionalize guard; |
2521 | tmp_output = at::_ops::bernoulli_p::call(self_, p, generator); |
2522 | } |
2523 | at::functionalization::impl::replace_(self, tmp_output); |
2524 | at::functionalization::impl::commit_update(self); |
2525 | at::functionalization::impl::sync(self); |
2526 | return self; |
2527 | } |
2528 | } |
2529 | |
2530 | at::Tensor & bmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { |
2531 | if (false) { |
2532 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2533 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2534 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2535 | auto self_meta = to_meta(self); |
2536 | auto mat2_meta = to_meta(mat2); |
2537 | auto out_meta = to_meta(out); |
2538 | at::AutoDispatchSkipFunctionalize func_guard; |
2539 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2540 | at::_ops::bmm_out::call(self_meta, mat2_meta, out_meta); |
2541 | } |
2542 | |
2543 | at::Tensor self_; |
2544 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2545 | at::functionalization::impl::sync(self); |
2546 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2547 | } else { |
2548 | self_ = self; |
2549 | } |
2550 | |
2551 | at::Tensor mat2_; |
2552 | if (at::functionalization::impl::isFunctionalTensor(mat2)) { |
2553 | at::functionalization::impl::sync(mat2); |
2554 | mat2_ = at::functionalization::impl::from_functional_tensor(mat2); |
2555 | } else { |
2556 | mat2_ = mat2; |
2557 | } |
2558 | |
2559 | at::Tensor out_; |
2560 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2561 | at::functionalization::impl::sync(out); |
2562 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2563 | } else { |
2564 | out_ = out; |
2565 | } |
2566 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2567 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat2))) { |
2568 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2569 | TORCH_INTERNAL_ASSERT(false, |
2570 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2571 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2572 | } else { |
2573 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2574 | at::AutoDispatchSkipFunctionalize guard; |
2575 | at::Tensor tmp_output = at::_ops::bmm_out::call(self_, mat2_, out_); |
2576 | return out;; |
2577 | } |
2578 | } else { |
2579 | at::Tensor tmp_output; |
2580 | { |
2581 | at::AutoDispatchSkipFunctionalize guard; |
2582 | tmp_output = at::_ops::bmm::call(self_, mat2_); |
2583 | } |
2584 | at::functionalization::impl::replace_(out, tmp_output); |
2585 | at::functionalization::impl::commit_update(out); |
2586 | at::functionalization::impl::sync(out); |
2587 | return out; |
2588 | } |
2589 | } |
2590 | |
2591 | at::Tensor & clamp_max_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max, at::Tensor & out) { |
2592 | if (false) { |
2593 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2594 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2595 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2596 | auto self_meta = to_meta(self); |
2597 | auto out_meta = to_meta(out); |
2598 | at::AutoDispatchSkipFunctionalize func_guard; |
2599 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2600 | at::_ops::clamp_max_out::call(self_meta, max, out_meta); |
2601 | } |
2602 | |
2603 | at::Tensor self_; |
2604 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2605 | at::functionalization::impl::sync(self); |
2606 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2607 | } else { |
2608 | self_ = self; |
2609 | } |
2610 | |
2611 | at::Tensor out_; |
2612 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2613 | at::functionalization::impl::sync(out); |
2614 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2615 | } else { |
2616 | out_ = out; |
2617 | } |
2618 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2619 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2620 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2621 | TORCH_INTERNAL_ASSERT(false, |
2622 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2623 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2624 | } else { |
2625 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2626 | at::AutoDispatchSkipFunctionalize guard; |
2627 | at::Tensor tmp_output = at::_ops::clamp_max_out::call(self_, max, out_); |
2628 | return out;; |
2629 | } |
2630 | } else { |
2631 | at::Tensor tmp_output; |
2632 | { |
2633 | at::AutoDispatchSkipFunctionalize guard; |
2634 | tmp_output = at::_ops::clamp_max::call(self_, max); |
2635 | } |
2636 | at::functionalization::impl::replace_(out, tmp_output); |
2637 | at::functionalization::impl::commit_update(out); |
2638 | at::functionalization::impl::sync(out); |
2639 | return out; |
2640 | } |
2641 | } |
2642 | |
2643 | at::Tensor & clamp_max_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & max) { |
2644 | if (true) { |
2645 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2646 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2647 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2648 | auto self_meta = to_meta(self); |
2649 | at::AutoDispatchSkipFunctionalize func_guard; |
2650 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2651 | at::_ops::clamp_max_::call(self_meta, max); |
2652 | } |
2653 | |
2654 | at::Tensor self_; |
2655 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2656 | at::functionalization::impl::sync(self); |
2657 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2658 | } else { |
2659 | self_ = self; |
2660 | } |
2661 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2662 | if ((false)) { |
2663 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2664 | TORCH_INTERNAL_ASSERT(false, |
2665 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2666 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2667 | } else { |
2668 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2669 | at::AutoDispatchSkipFunctionalize guard; |
2670 | at::Tensor tmp_output = at::_ops::clamp_max_::call(self_, max); |
2671 | return self;; |
2672 | } |
2673 | } else { |
2674 | at::Tensor tmp_output; |
2675 | { |
2676 | at::AutoDispatchSkipFunctionalize guard; |
2677 | tmp_output = at::_ops::clamp_max::call(self_, max); |
2678 | } |
2679 | at::functionalization::impl::replace_(self, tmp_output); |
2680 | at::functionalization::impl::commit_update(self); |
2681 | at::functionalization::impl::sync(self); |
2682 | return self; |
2683 | } |
2684 | } |
2685 | |
2686 | at::Tensor & clamp_max_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max, at::Tensor & out) { |
2687 | if (false) { |
2688 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2689 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2690 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2691 | auto self_meta = to_meta(self); |
2692 | auto max_meta = to_meta(max); |
2693 | auto out_meta = to_meta(out); |
2694 | at::AutoDispatchSkipFunctionalize func_guard; |
2695 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2696 | at::_ops::clamp_max_Tensor_out::call(self_meta, max_meta, out_meta); |
2697 | } |
2698 | |
2699 | at::Tensor self_; |
2700 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2701 | at::functionalization::impl::sync(self); |
2702 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2703 | } else { |
2704 | self_ = self; |
2705 | } |
2706 | |
2707 | at::Tensor max_; |
2708 | if (at::functionalization::impl::isFunctionalTensor(max)) { |
2709 | at::functionalization::impl::sync(max); |
2710 | max_ = at::functionalization::impl::from_functional_tensor(max); |
2711 | } else { |
2712 | max_ = max; |
2713 | } |
2714 | |
2715 | at::Tensor out_; |
2716 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2717 | at::functionalization::impl::sync(out); |
2718 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2719 | } else { |
2720 | out_ = out; |
2721 | } |
2722 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2723 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(max))) { |
2724 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2725 | TORCH_INTERNAL_ASSERT(false, |
2726 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2727 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2728 | } else { |
2729 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2730 | at::AutoDispatchSkipFunctionalize guard; |
2731 | at::Tensor tmp_output = at::_ops::clamp_max_Tensor_out::call(self_, max_, out_); |
2732 | return out;; |
2733 | } |
2734 | } else { |
2735 | at::Tensor tmp_output; |
2736 | { |
2737 | at::AutoDispatchSkipFunctionalize guard; |
2738 | tmp_output = at::_ops::clamp_max_Tensor::call(self_, max_); |
2739 | } |
2740 | at::functionalization::impl::replace_(out, tmp_output); |
2741 | at::functionalization::impl::commit_update(out); |
2742 | at::functionalization::impl::sync(out); |
2743 | return out; |
2744 | } |
2745 | } |
2746 | |
2747 | at::Tensor & clamp_max__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & max) { |
2748 | if (true) { |
2749 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2750 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2751 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2752 | auto self_meta = to_meta(self); |
2753 | auto max_meta = to_meta(max); |
2754 | at::AutoDispatchSkipFunctionalize func_guard; |
2755 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2756 | at::_ops::clamp_max__Tensor::call(self_meta, max_meta); |
2757 | } |
2758 | |
2759 | at::Tensor self_; |
2760 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2761 | at::functionalization::impl::sync(self); |
2762 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2763 | } else { |
2764 | self_ = self; |
2765 | } |
2766 | |
2767 | at::Tensor max_; |
2768 | if (at::functionalization::impl::isFunctionalTensor(max)) { |
2769 | at::functionalization::impl::sync(max); |
2770 | max_ = at::functionalization::impl::from_functional_tensor(max); |
2771 | } else { |
2772 | max_ = max; |
2773 | } |
2774 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2775 | if ((false || at::functionalization::impl::isFunctionalTensor(max))) { |
2776 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2777 | TORCH_INTERNAL_ASSERT(false, |
2778 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2779 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2780 | } else { |
2781 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2782 | at::AutoDispatchSkipFunctionalize guard; |
2783 | at::Tensor tmp_output = at::_ops::clamp_max__Tensor::call(self_, max_); |
2784 | return self;; |
2785 | } |
2786 | } else { |
2787 | at::Tensor tmp_output; |
2788 | { |
2789 | at::AutoDispatchSkipFunctionalize guard; |
2790 | tmp_output = at::_ops::clamp_max_Tensor::call(self_, max_); |
2791 | } |
2792 | at::functionalization::impl::replace_(self, tmp_output); |
2793 | at::functionalization::impl::commit_update(self); |
2794 | at::functionalization::impl::sync(self); |
2795 | return self; |
2796 | } |
2797 | } |
2798 | |
2799 | at::Tensor & clamp_min_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min, at::Tensor & out) { |
2800 | if (false) { |
2801 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2802 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2803 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2804 | auto self_meta = to_meta(self); |
2805 | auto out_meta = to_meta(out); |
2806 | at::AutoDispatchSkipFunctionalize func_guard; |
2807 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2808 | at::_ops::clamp_min_out::call(self_meta, min, out_meta); |
2809 | } |
2810 | |
2811 | at::Tensor self_; |
2812 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2813 | at::functionalization::impl::sync(self); |
2814 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2815 | } else { |
2816 | self_ = self; |
2817 | } |
2818 | |
2819 | at::Tensor out_; |
2820 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2821 | at::functionalization::impl::sync(out); |
2822 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2823 | } else { |
2824 | out_ = out; |
2825 | } |
2826 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2827 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2828 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2829 | TORCH_INTERNAL_ASSERT(false, |
2830 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2831 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2832 | } else { |
2833 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2834 | at::AutoDispatchSkipFunctionalize guard; |
2835 | at::Tensor tmp_output = at::_ops::clamp_min_out::call(self_, min, out_); |
2836 | return out;; |
2837 | } |
2838 | } else { |
2839 | at::Tensor tmp_output; |
2840 | { |
2841 | at::AutoDispatchSkipFunctionalize guard; |
2842 | tmp_output = at::_ops::clamp_min::call(self_, min); |
2843 | } |
2844 | at::functionalization::impl::replace_(out, tmp_output); |
2845 | at::functionalization::impl::commit_update(out); |
2846 | at::functionalization::impl::sync(out); |
2847 | return out; |
2848 | } |
2849 | } |
2850 | |
2851 | at::Tensor & clamp_min_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min) { |
2852 | if (true) { |
2853 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2854 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2855 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2856 | auto self_meta = to_meta(self); |
2857 | at::AutoDispatchSkipFunctionalize func_guard; |
2858 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2859 | at::_ops::clamp_min_::call(self_meta, min); |
2860 | } |
2861 | |
2862 | at::Tensor self_; |
2863 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2864 | at::functionalization::impl::sync(self); |
2865 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2866 | } else { |
2867 | self_ = self; |
2868 | } |
2869 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2870 | if ((false)) { |
2871 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2872 | TORCH_INTERNAL_ASSERT(false, |
2873 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2874 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2875 | } else { |
2876 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2877 | at::AutoDispatchSkipFunctionalize guard; |
2878 | at::Tensor tmp_output = at::_ops::clamp_min_::call(self_, min); |
2879 | return self;; |
2880 | } |
2881 | } else { |
2882 | at::Tensor tmp_output; |
2883 | { |
2884 | at::AutoDispatchSkipFunctionalize guard; |
2885 | tmp_output = at::_ops::clamp_min::call(self_, min); |
2886 | } |
2887 | at::functionalization::impl::replace_(self, tmp_output); |
2888 | at::functionalization::impl::commit_update(self); |
2889 | at::functionalization::impl::sync(self); |
2890 | return self; |
2891 | } |
2892 | } |
2893 | |
2894 | at::Tensor & clamp_min_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min, at::Tensor & out) { |
2895 | if (false) { |
2896 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2897 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2898 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2899 | auto self_meta = to_meta(self); |
2900 | auto min_meta = to_meta(min); |
2901 | auto out_meta = to_meta(out); |
2902 | at::AutoDispatchSkipFunctionalize func_guard; |
2903 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2904 | at::_ops::clamp_min_Tensor_out::call(self_meta, min_meta, out_meta); |
2905 | } |
2906 | |
2907 | at::Tensor self_; |
2908 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2909 | at::functionalization::impl::sync(self); |
2910 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2911 | } else { |
2912 | self_ = self; |
2913 | } |
2914 | |
2915 | at::Tensor min_; |
2916 | if (at::functionalization::impl::isFunctionalTensor(min)) { |
2917 | at::functionalization::impl::sync(min); |
2918 | min_ = at::functionalization::impl::from_functional_tensor(min); |
2919 | } else { |
2920 | min_ = min; |
2921 | } |
2922 | |
2923 | at::Tensor out_; |
2924 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2925 | at::functionalization::impl::sync(out); |
2926 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2927 | } else { |
2928 | out_ = out; |
2929 | } |
2930 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2931 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(min))) { |
2932 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2933 | TORCH_INTERNAL_ASSERT(false, |
2934 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2935 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2936 | } else { |
2937 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2938 | at::AutoDispatchSkipFunctionalize guard; |
2939 | at::Tensor tmp_output = at::_ops::clamp_min_Tensor_out::call(self_, min_, out_); |
2940 | return out;; |
2941 | } |
2942 | } else { |
2943 | at::Tensor tmp_output; |
2944 | { |
2945 | at::AutoDispatchSkipFunctionalize guard; |
2946 | tmp_output = at::_ops::clamp_min_Tensor::call(self_, min_); |
2947 | } |
2948 | at::functionalization::impl::replace_(out, tmp_output); |
2949 | at::functionalization::impl::commit_update(out); |
2950 | at::functionalization::impl::sync(out); |
2951 | return out; |
2952 | } |
2953 | } |
2954 | |
2955 | at::Tensor & clamp_min__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & min) { |
2956 | if (true) { |
2957 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2958 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2959 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2960 | auto self_meta = to_meta(self); |
2961 | auto min_meta = to_meta(min); |
2962 | at::AutoDispatchSkipFunctionalize func_guard; |
2963 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2964 | at::_ops::clamp_min__Tensor::call(self_meta, min_meta); |
2965 | } |
2966 | |
2967 | at::Tensor self_; |
2968 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2969 | at::functionalization::impl::sync(self); |
2970 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2971 | } else { |
2972 | self_ = self; |
2973 | } |
2974 | |
2975 | at::Tensor min_; |
2976 | if (at::functionalization::impl::isFunctionalTensor(min)) { |
2977 | at::functionalization::impl::sync(min); |
2978 | min_ = at::functionalization::impl::from_functional_tensor(min); |
2979 | } else { |
2980 | min_ = min; |
2981 | } |
2982 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2983 | if ((false || at::functionalization::impl::isFunctionalTensor(min))) { |
2984 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2985 | TORCH_INTERNAL_ASSERT(false, |
2986 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2987 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2988 | } else { |
2989 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2990 | at::AutoDispatchSkipFunctionalize guard; |
2991 | at::Tensor tmp_output = at::_ops::clamp_min__Tensor::call(self_, min_); |
2992 | return self;; |
2993 | } |
2994 | } else { |
2995 | at::Tensor tmp_output; |
2996 | { |
2997 | at::AutoDispatchSkipFunctionalize guard; |
2998 | tmp_output = at::_ops::clamp_min_Tensor::call(self_, min_); |
2999 | } |
3000 | at::functionalization::impl::replace_(self, tmp_output); |
3001 | at::functionalization::impl::commit_update(self); |
3002 | at::functionalization::impl::sync(self); |
3003 | return self; |
3004 | } |
3005 | } |
3006 | |
3007 | at::Tensor & clip_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max, at::Tensor & out) { |
3008 | if (false) { |
3009 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3010 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3011 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3012 | auto self_meta = to_meta(self); |
3013 | auto out_meta = to_meta(out); |
3014 | at::AutoDispatchSkipFunctionalize func_guard; |
3015 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3016 | at::_ops::clip_out::call(self_meta, min, max, out_meta); |
3017 | } |
3018 | |
3019 | at::Tensor self_; |
3020 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3021 | at::functionalization::impl::sync(self); |
3022 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3023 | } else { |
3024 | self_ = self; |
3025 | } |
3026 | |
3027 | at::Tensor out_; |
3028 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3029 | at::functionalization::impl::sync(out); |
3030 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3031 | } else { |
3032 | out_ = out; |
3033 | } |
3034 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3035 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
3036 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3037 | TORCH_INTERNAL_ASSERT(false, |
3038 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3039 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3040 | } else { |
3041 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3042 | at::AutoDispatchSkipFunctionalize guard; |
3043 | at::Tensor tmp_output = at::_ops::clip_out::call(self_, min, max, out_); |
3044 | return out;; |
3045 | } |
3046 | } else { |
3047 | at::Tensor tmp_output; |
3048 | { |
3049 | at::AutoDispatchSkipFunctionalize guard; |
3050 | tmp_output = at::_ops::clip::call(self_, min, max); |
3051 | } |
3052 | at::functionalization::impl::replace_(out, tmp_output); |
3053 | at::functionalization::impl::commit_update(out); |
3054 | at::functionalization::impl::sync(out); |
3055 | return out; |
3056 | } |
3057 | } |
3058 | |
3059 | at::Tensor & clip_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) { |
3060 | if (true) { |
3061 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3062 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3063 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3064 | auto self_meta = to_meta(self); |
3065 | at::AutoDispatchSkipFunctionalize func_guard; |
3066 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3067 | at::_ops::clip_::call(self_meta, min, max); |
3068 | } |
3069 | |
3070 | at::Tensor self_; |
3071 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3072 | at::functionalization::impl::sync(self); |
3073 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3074 | } else { |
3075 | self_ = self; |
3076 | } |
3077 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
3078 | if ((false)) { |
3079 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3080 | TORCH_INTERNAL_ASSERT(false, |
3081 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3082 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3083 | } else { |
3084 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3085 | at::AutoDispatchSkipFunctionalize guard; |
3086 | at::Tensor tmp_output = at::_ops::clip_::call(self_, min, max); |
3087 | return self;; |
3088 | } |
3089 | } else { |
3090 | at::Tensor tmp_output; |
3091 | { |
3092 | at::AutoDispatchSkipFunctionalize guard; |
3093 | tmp_output = at::_ops::clip::call(self_, min, max); |
3094 | } |
3095 | at::functionalization::impl::replace_(self, tmp_output); |
3096 | at::functionalization::impl::commit_update(self); |
3097 | at::functionalization::impl::sync(self); |
3098 | return self; |
3099 | } |
3100 | } |
3101 | |
3102 | at::Tensor & clip_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max, at::Tensor & out) { |
3103 | if (false) { |
3104 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3105 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3106 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3107 | auto self_meta = to_meta(self); |
3108 | auto min_meta = to_meta(min); |
3109 | auto max_meta = to_meta(max); |
3110 | auto out_meta = to_meta(out); |
3111 | at::AutoDispatchSkipFunctionalize func_guard; |
3112 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3113 | at::_ops::clip_Tensor_out::call(self_meta, min_meta, max_meta, out_meta); |
3114 | } |
3115 | |
3116 | at::Tensor self_; |
3117 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3118 | at::functionalization::impl::sync(self); |
3119 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3120 | } else { |
3121 | self_ = self; |
3122 | } |
3123 | |
3124 | c10::optional<at::Tensor> min_; |
3125 | if (at::functionalization::impl::isFunctionalTensor(min)) { |
3126 | at::functionalization::impl::sync(min); |
3127 | min_ = at::functionalization::impl::from_functional_tensor(min); |
3128 | } else { |
3129 | min_ = min; |
3130 | } |
3131 | |
3132 | c10::optional<at::Tensor> max_; |
3133 | if (at::functionalization::impl::isFunctionalTensor(max)) { |
3134 | at::functionalization::impl::sync(max); |
3135 | max_ = at::functionalization::impl::from_functional_tensor(max); |
3136 | } else { |
3137 | max_ = max; |
3138 | } |
3139 | |
3140 | at::Tensor out_; |
3141 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3142 | at::functionalization::impl::sync(out); |
3143 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3144 | } else { |
3145 | out_ = out; |
3146 | } |
3147 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3148 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(min) || at::functionalization::impl::isFunctionalTensor(max))) { |
3149 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3150 | TORCH_INTERNAL_ASSERT(false, |
3151 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3152 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3153 | } else { |
3154 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3155 | at::AutoDispatchSkipFunctionalize guard; |
3156 | at::Tensor tmp_output = at::_ops::clip_Tensor_out::call(self_, min_, max_, out_); |
3157 | return out;; |
3158 | } |
3159 | } else { |
3160 | at::Tensor tmp_output; |
3161 | { |
3162 | at::AutoDispatchSkipFunctionalize guard; |
3163 | tmp_output = at::_ops::clip_Tensor::call(self_, min_, max_); |
3164 | } |
3165 | at::functionalization::impl::replace_(out, tmp_output); |
3166 | at::functionalization::impl::commit_update(out); |
3167 | at::functionalization::impl::sync(out); |
3168 | return out; |
3169 | } |
3170 | } |
3171 | |
3172 | at::Tensor & clip__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) { |
3173 | if (true) { |
3174 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3175 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3176 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3177 | auto self_meta = to_meta(self); |
3178 | auto min_meta = to_meta(min); |
3179 | auto max_meta = to_meta(max); |
3180 | at::AutoDispatchSkipFunctionalize func_guard; |
3181 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3182 | at::_ops::clip__Tensor::call(self_meta, min_meta, max_meta); |
3183 | } |
3184 | |
3185 | at::Tensor self_; |
3186 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3187 | at::functionalization::impl::sync(self); |
3188 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3189 | } else { |
3190 | self_ = self; |
3191 | } |
3192 | |
3193 | c10::optional<at::Tensor> min_; |
3194 | if (at::functionalization::impl::isFunctionalTensor(min)) { |
3195 | at::functionalization::impl::sync(min); |
3196 | min_ = at::functionalization::impl::from_functional_tensor(min); |
3197 | } else { |
3198 | min_ = min; |
3199 | } |
3200 | |
3201 | c10::optional<at::Tensor> max_; |
3202 | if (at::functionalization::impl::isFunctionalTensor(max)) { |
3203 | at::functionalization::impl::sync(max); |
3204 | max_ = at::functionalization::impl::from_functional_tensor(max); |
3205 | } else { |
3206 | max_ = max; |
3207 | } |
3208 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
3209 | if ((false || at::functionalization::impl::isFunctionalTensor(min) || at::functionalization::impl::isFunctionalTensor(max))) { |
3210 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3211 | TORCH_INTERNAL_ASSERT(false, |
3212 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3213 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3214 | } else { |
3215 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3216 | at::AutoDispatchSkipFunctionalize guard; |
3217 | at::Tensor tmp_output = at::_ops::clip__Tensor::call(self_, min_, max_); |
3218 | return self;; |
3219 | } |
3220 | } else { |
3221 | at::Tensor tmp_output; |
3222 | { |
3223 | at::AutoDispatchSkipFunctionalize guard; |
3224 | tmp_output = at::_ops::clip_Tensor::call(self_, min_, max_); |
3225 | } |
3226 | at::functionalization::impl::replace_(self, tmp_output); |
3227 | at::functionalization::impl::commit_update(self); |
3228 | at::functionalization::impl::sync(self); |
3229 | return self; |
3230 | } |
3231 | } |
3232 | |
3233 | at::Tensor & complex_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & real, const at::Tensor & imag, at::Tensor & out) { |
3234 | if (false) { |
3235 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3236 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3237 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3238 | auto real_meta = to_meta(real); |
3239 | auto imag_meta = to_meta(imag); |
3240 | auto out_meta = to_meta(out); |
3241 | at::AutoDispatchSkipFunctionalize func_guard; |
3242 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3243 | at::_ops::complex_out::call(real_meta, imag_meta, out_meta); |
3244 | } |
3245 | |
3246 | at::Tensor real_; |
3247 | if (at::functionalization::impl::isFunctionalTensor(real)) { |
3248 | at::functionalization::impl::sync(real); |
3249 | real_ = at::functionalization::impl::from_functional_tensor(real); |
3250 | } else { |
3251 | real_ = real; |
3252 | } |
3253 | |
3254 | at::Tensor imag_; |
3255 | if (at::functionalization::impl::isFunctionalTensor(imag)) { |
3256 | at::functionalization::impl::sync(imag); |
3257 | imag_ = at::functionalization::impl::from_functional_tensor(imag); |
3258 | } else { |
3259 | imag_ = imag; |
3260 | } |
3261 | |
3262 | at::Tensor out_; |
3263 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3264 | at::functionalization::impl::sync(out); |
3265 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3266 | } else { |
3267 | out_ = out; |
3268 | } |
3269 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3270 | if ((false || at::functionalization::impl::isFunctionalTensor(real) || at::functionalization::impl::isFunctionalTensor(imag))) { |
3271 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3272 | TORCH_INTERNAL_ASSERT(false, |
3273 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3274 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3275 | } else { |
3276 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3277 | at::AutoDispatchSkipFunctionalize guard; |
3278 | at::Tensor tmp_output = at::_ops::complex_out::call(real_, imag_, out_); |
3279 | return out;; |
3280 | } |
3281 | } else { |
3282 | at::Tensor tmp_output; |
3283 | { |
3284 | at::AutoDispatchSkipFunctionalize guard; |
3285 | tmp_output = at::_ops::complex::call(real_, imag_); |
3286 | } |
3287 | at::functionalization::impl::replace_(out, tmp_output); |
3288 | at::functionalization::impl::commit_update(out); |
3289 | at::functionalization::impl::sync(out); |
3290 | return out; |
3291 | } |
3292 | } |
3293 | |
3294 | at::Tensor & constant_pad_nd_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) { |
3295 | if (false) { |
3296 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3297 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3298 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3299 | auto self_meta = to_meta(self); |
3300 | auto out_meta = to_meta(out); |
3301 | at::AutoDispatchSkipFunctionalize func_guard; |
3302 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3303 | at::_ops::constant_pad_nd_out::call(self_meta, pad, value, out_meta); |
3304 | } |
3305 | |
3306 | at::Tensor self_; |
3307 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3308 | at::functionalization::impl::sync(self); |
3309 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3310 | } else { |
3311 | self_ = self; |
3312 | } |
3313 | |
3314 | at::Tensor out_; |
3315 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3316 | at::functionalization::impl::sync(out); |
3317 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3318 | } else { |
3319 | out_ = out; |
3320 | } |
3321 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3322 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
3323 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3324 | TORCH_INTERNAL_ASSERT(false, |
3325 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3326 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3327 | } else { |
3328 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3329 | at::AutoDispatchSkipFunctionalize guard; |
3330 | at::Tensor tmp_output = at::_ops::constant_pad_nd_out::call(self_, pad, value, out_); |
3331 | return out;; |
3332 | } |
3333 | } else { |
3334 | at::Tensor tmp_output; |
3335 | { |
3336 | at::AutoDispatchSkipFunctionalize guard; |
3337 | tmp_output = at::_ops::constant_pad_nd::call(self_, pad, value); |
3338 | } |
3339 | at::functionalization::impl::replace_(out, tmp_output); |
3340 | at::functionalization::impl::commit_update(out); |
3341 | at::functionalization::impl::sync(out); |
3342 | return out; |
3343 | } |
3344 | } |
3345 | |
3346 | at::Tensor & conv_tbc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) { |
3347 | if (false) { |
3348 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3349 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3350 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3351 | auto self_meta = to_meta(self); |
3352 | auto weight_meta = to_meta(weight); |
3353 | auto bias_meta = to_meta(bias); |
3354 | auto out_meta = to_meta(out); |
3355 | at::AutoDispatchSkipFunctionalize func_guard; |
3356 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3357 | at::_ops::conv_tbc_out::call(self_meta, weight_meta, bias_meta, pad, out_meta); |
3358 | } |
3359 | |
3360 | at::Tensor self_; |
3361 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3362 | at::functionalization::impl::sync(self); |
3363 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3364 | } else { |
3365 | self_ = self; |
3366 | } |
3367 | |
3368 | at::Tensor weight_; |
3369 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
3370 | at::functionalization::impl::sync(weight); |
3371 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
3372 | } else { |
3373 | weight_ = weight; |
3374 | } |
3375 | |
3376 | at::Tensor bias_; |
3377 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
3378 | at::functionalization::impl::sync(bias); |
3379 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
3380 | } else { |
3381 | bias_ = bias; |
3382 | } |
3383 | |
3384 | at::Tensor out_; |
3385 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3386 | at::functionalization::impl::sync(out); |
3387 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3388 | } else { |
3389 | out_ = out; |
3390 | } |
3391 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3392 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
3393 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3394 | TORCH_INTERNAL_ASSERT(false, |
3395 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3396 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3397 | } else { |
3398 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3399 | at::AutoDispatchSkipFunctionalize guard; |
3400 | at::Tensor tmp_output = at::_ops::conv_tbc_out::call(self_, weight_, bias_, pad, out_); |
3401 | return out;; |
3402 | } |
3403 | } else { |
3404 | at::Tensor tmp_output; |
3405 | { |
3406 | at::AutoDispatchSkipFunctionalize guard; |
3407 | tmp_output = at::_ops::conv_tbc::call(self_, weight_, bias_, pad); |
3408 | } |
3409 | at::functionalization::impl::replace_(out, tmp_output); |
3410 | at::functionalization::impl::commit_update(out); |
3411 | at::functionalization::impl::sync(out); |
3412 | return out; |
3413 | } |
3414 | } |
3415 | |
3416 | at::Tensor & _copy_from_and_resize_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, at::Tensor & out) { |
3417 | if (false) { |
3418 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3419 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3420 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3421 | auto self_meta = to_meta(self); |
3422 | auto dst_meta = to_meta(dst); |
3423 | auto out_meta = to_meta(out); |
3424 | at::AutoDispatchSkipFunctionalize func_guard; |
3425 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3426 | at::_ops::_copy_from_and_resize_out::call(self_meta, dst_meta, out_meta); |
3427 | } |
3428 | |
3429 | at::Tensor self_; |
3430 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3431 | at::functionalization::impl::sync(self); |
3432 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3433 | } else { |
3434 | self_ = self; |
3435 | } |
3436 | |
3437 | at::Tensor dst_; |
3438 | if (at::functionalization::impl::isFunctionalTensor(dst)) { |
3439 | at::functionalization::impl::sync(dst); |
3440 | dst_ = at::functionalization::impl::from_functional_tensor(dst); |
3441 | } else { |
3442 | dst_ = dst; |
3443 | } |
3444 | |
3445 | at::Tensor out_; |
3446 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3447 | at::functionalization::impl::sync(out); |
3448 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3449 | } else { |
3450 | out_ = out; |
3451 | } |
3452 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3453 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(dst))) { |
3454 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3455 | TORCH_INTERNAL_ASSERT(false, |
3456 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3457 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3458 | } else { |
3459 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3460 | at::AutoDispatchSkipFunctionalize guard; |
3461 | at::Tensor tmp_output = at::_ops::_copy_from_and_resize_out::call(self_, dst_, out_); |
3462 | return out;; |
3463 | } |
3464 | } else { |
3465 | at::Tensor tmp_output; |
3466 | { |
3467 | at::AutoDispatchSkipFunctionalize guard; |
3468 | tmp_output = at::_ops::_copy_from_and_resize::call(self_, dst_); |
3469 | } |
3470 | at::functionalization::impl::replace_(out, tmp_output); |
3471 | at::functionalization::impl::commit_update(out); |
3472 | at::functionalization::impl::sync(out); |
3473 | return out; |
3474 | } |
3475 | } |
3476 | |
3477 | at::Tensor & cos_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
3478 | if (false) { |
3479 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3480 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3481 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3482 | auto self_meta = to_meta(self); |
3483 | auto out_meta = to_meta(out); |
3484 | at::AutoDispatchSkipFunctionalize func_guard; |
3485 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3486 | at::_ops::cos_out::call(self_meta, out_meta); |
3487 | } |
3488 | |
3489 | at::Tensor self_; |
3490 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3491 | at::functionalization::impl::sync(self); |
3492 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3493 | } else { |
3494 | self_ = self; |
3495 | } |
3496 | |
3497 | at::Tensor out_; |
3498 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3499 | at::functionalization::impl::sync(out); |
3500 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3501 | } else { |
3502 | out_ = out; |
3503 | } |
3504 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3505 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
3506 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3507 | TORCH_INTERNAL_ASSERT(false, |
3508 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3509 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3510 | } else { |
3511 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3512 | at::AutoDispatchSkipFunctionalize guard; |
3513 | at::Tensor tmp_output = at::_ops::cos_out::call(self_, out_); |
3514 | return out;; |
3515 | } |
3516 | } else { |
3517 | at::Tensor tmp_output; |
3518 | { |
3519 | at::AutoDispatchSkipFunctionalize guard; |
3520 | tmp_output = at::_ops::cos::call(self_); |
3521 | } |
3522 | at::functionalization::impl::replace_(out, tmp_output); |
3523 | at::functionalization::impl::commit_update(out); |
3524 | at::functionalization::impl::sync(out); |
3525 | return out; |
3526 | } |
3527 | } |
3528 | |
3529 | at::Tensor & cos_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
3530 | if (true) { |
3531 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3532 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3533 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3534 | auto self_meta = to_meta(self); |
3535 | at::AutoDispatchSkipFunctionalize func_guard; |
3536 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3537 | at::_ops::cos_::call(self_meta); |
3538 | } |
3539 | |
3540 | at::Tensor self_; |
3541 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3542 | at::functionalization::impl::sync(self); |
3543 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3544 | } else { |
3545 | self_ = self; |
3546 | } |
3547 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
3548 | if ((false)) { |
3549 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3550 | TORCH_INTERNAL_ASSERT(false, |
3551 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3552 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3553 | } else { |
3554 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3555 | at::AutoDispatchSkipFunctionalize guard; |
3556 | at::Tensor tmp_output = at::_ops::cos_::call(self_); |
3557 | return self;; |
3558 | } |
3559 | } else { |
3560 | at::Tensor tmp_output; |
3561 | { |
3562 | at::AutoDispatchSkipFunctionalize guard; |
3563 | tmp_output = at::_ops::cos::call(self_); |
3564 | } |
3565 | at::functionalization::impl::replace_(self, tmp_output); |
3566 | at::functionalization::impl::commit_update(self); |
3567 | at::functionalization::impl::sync(self); |
3568 | return self; |
3569 | } |
3570 | } |
3571 | |
3572 | at::Tensor & count_nonzero_out_dim_IntList_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
3573 | if (false) { |
3574 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3575 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3576 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3577 | auto self_meta = to_meta(self); |
3578 | auto out_meta = to_meta(out); |
3579 | at::AutoDispatchSkipFunctionalize func_guard; |
3580 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3581 | at::_ops::count_nonzero_dim_IntList_out::call(self_meta, dim, out_meta); |
3582 | } |
3583 | |
3584 | at::Tensor self_; |
3585 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3586 | at::functionalization::impl::sync(self); |
3587 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3588 | } else { |
3589 | self_ = self; |
3590 | } |
3591 | |
3592 | at::Tensor out_; |
3593 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3594 | at::functionalization::impl::sync(out); |
3595 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3596 | } else { |
3597 | out_ = out; |
3598 | } |
3599 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3600 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
3601 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3602 | TORCH_INTERNAL_ASSERT(false, |
3603 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3604 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3605 | } else { |
3606 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3607 | at::AutoDispatchSkipFunctionalize guard; |
3608 | at::Tensor tmp_output = at::_ops::count_nonzero_dim_IntList_out::call(self_, dim, out_); |
3609 | return out;; |
3610 | } |
3611 | } else { |
3612 | at::Tensor tmp_output; |
3613 | { |
3614 | at::AutoDispatchSkipFunctionalize guard; |
3615 | tmp_output = at::_ops::count_nonzero_dim_IntList::call(self_, dim); |
3616 | } |
3617 | at::functionalization::impl::replace_(out, tmp_output); |
3618 | at::functionalization::impl::commit_update(out); |
3619 | at::functionalization::impl::sync(out); |
3620 | return out; |
3621 | } |
3622 | } |
3623 | |
3624 | at::Tensor & count_nonzero_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dim, at::Tensor & out) { |
3625 | if (false) { |
3626 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3627 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3628 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3629 | auto self_meta = to_meta(self); |
3630 | auto out_meta = to_meta(out); |
3631 | at::AutoDispatchSkipFunctionalize func_guard; |
3632 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3633 | at::_ops::count_nonzero_out::call(self_meta, dim, out_meta); |
3634 | } |
3635 | |
3636 | at::Tensor self_; |
3637 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3638 | at::functionalization::impl::sync(self); |
3639 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3640 | } else { |
3641 | self_ = self; |
3642 | } |
3643 | |
3644 | at::Tensor out_; |
3645 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3646 | at::functionalization::impl::sync(out); |
3647 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3648 | } else { |
3649 | out_ = out; |
3650 | } |
3651 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3652 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
3653 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3654 | TORCH_INTERNAL_ASSERT(false, |
3655 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3656 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3657 | } else { |
3658 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3659 | at::AutoDispatchSkipFunctionalize guard; |
3660 | at::Tensor tmp_output = at::_ops::count_nonzero_out::call(self_, dim, out_); |
3661 | return out;; |
3662 | } |
3663 | } else { |
3664 | at::Tensor tmp_output; |
3665 | { |
3666 | at::AutoDispatchSkipFunctionalize guard; |
3667 | tmp_output = at::_ops::count_nonzero::call(self_, dim); |
3668 | } |
3669 | at::functionalization::impl::replace_(out, tmp_output); |
3670 | at::functionalization::impl::commit_update(out); |
3671 | at::functionalization::impl::sync(out); |
3672 | return out; |
3673 | } |
3674 | } |
3675 | |
3676 | at::Tensor & cudnn_affine_grid_generator_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) { |
3677 | if (false) { |
3678 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3679 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3680 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3681 | auto grad_meta = to_meta(grad); |
3682 | auto out_meta = to_meta(out); |
3683 | at::AutoDispatchSkipFunctionalize func_guard; |
3684 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3685 | at::_ops::cudnn_affine_grid_generator_backward_out::call(grad_meta, N, C, H, W, out_meta); |
3686 | } |
3687 | |
3688 | at::Tensor grad_; |
3689 | if (at::functionalization::impl::isFunctionalTensor(grad)) { |
3690 | at::functionalization::impl::sync(grad); |
3691 | grad_ = at::functionalization::impl::from_functional_tensor(grad); |
3692 | } else { |
3693 | grad_ = grad; |
3694 | } |
3695 | |
3696 | at::Tensor out_; |
3697 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3698 | at::functionalization::impl::sync(out); |
3699 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3700 | } else { |
3701 | out_ = out; |
3702 | } |
3703 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3704 | if ((false || at::functionalization::impl::isFunctionalTensor(grad))) { |
3705 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3706 | TORCH_INTERNAL_ASSERT(false, |
3707 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3708 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3709 | } else { |
3710 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3711 | at::AutoDispatchSkipFunctionalize guard; |
3712 | at::Tensor tmp_output = at::_ops::cudnn_affine_grid_generator_backward_out::call(grad_, N, C, H, W, out_); |
3713 | return out;; |
3714 | } |
3715 | } else { |
3716 | at::Tensor tmp_output; |
3717 | { |
3718 | at::AutoDispatchSkipFunctionalize guard; |
3719 | tmp_output = at::_ops::cudnn_affine_grid_generator_backward::call(grad_, N, C, H, W); |
3720 | } |
3721 | at::functionalization::impl::replace_(out, tmp_output); |
3722 | at::functionalization::impl::commit_update(out); |
3723 | at::functionalization::impl::sync(out); |
3724 | return out; |
3725 | } |
3726 | } |
3727 | |
3728 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
3729 | if (false) { |
3730 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3731 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3732 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3733 | auto input_meta = to_meta(input); |
3734 | auto weight_meta = to_meta(weight); |
3735 | auto bias_meta = to_meta(bias); |
3736 | auto running_mean_meta = to_meta(running_mean); |
3737 | auto running_var_meta = to_meta(running_var); |
3738 | auto out0_meta = to_meta(out0); |
3739 | auto out1_meta = to_meta(out1); |
3740 | auto out2_meta = to_meta(out2); |
3741 | auto out3_meta = to_meta(out3); |
3742 | at::AutoDispatchSkipFunctionalize func_guard; |
3743 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3744 | at::_ops::cudnn_batch_norm_out::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, training, exponential_average_factor, epsilon, out0_meta, out1_meta, out2_meta, out3_meta); |
3745 | } |
3746 | |
3747 | at::Tensor input_; |
3748 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
3749 | at::functionalization::impl::sync(input); |
3750 | input_ = at::functionalization::impl::from_functional_tensor(input); |
3751 | } else { |
3752 | input_ = input; |
3753 | } |
3754 | |
3755 | at::Tensor weight_; |
3756 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
3757 | at::functionalization::impl::sync(weight); |
3758 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
3759 | } else { |
3760 | weight_ = weight; |
3761 | } |
3762 | |
3763 | c10::optional<at::Tensor> bias_; |
3764 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
3765 | at::functionalization::impl::sync(bias); |
3766 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
3767 | } else { |
3768 | bias_ = bias; |
3769 | } |
3770 | |
3771 | c10::optional<at::Tensor> running_mean_; |
3772 | if (at::functionalization::impl::isFunctionalTensor(running_mean)) { |
3773 | at::functionalization::impl::sync(running_mean); |
3774 | running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean); |
3775 | } else { |
3776 | running_mean_ = running_mean; |
3777 | } |
3778 | |
3779 | c10::optional<at::Tensor> running_var_; |
3780 | if (at::functionalization::impl::isFunctionalTensor(running_var)) { |
3781 | at::functionalization::impl::sync(running_var); |
3782 | running_var_ = at::functionalization::impl::from_functional_tensor(running_var); |
3783 | } else { |
3784 | running_var_ = running_var; |
3785 | } |
3786 | |
3787 | at::Tensor out0_; |
3788 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
3789 | at::functionalization::impl::sync(out0); |
3790 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
3791 | } else { |
3792 | out0_ = out0; |
3793 | } |
3794 | |
3795 | at::Tensor out1_; |
3796 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
3797 | at::functionalization::impl::sync(out1); |
3798 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
3799 | } else { |
3800 | out1_ = out1; |
3801 | } |
3802 | |
3803 | at::Tensor out2_; |
3804 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
3805 | at::functionalization::impl::sync(out2); |
3806 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
3807 | } else { |
3808 | out2_ = out2; |
3809 | } |
3810 | |
3811 | at::Tensor out3_; |
3812 | if (at::functionalization::impl::isFunctionalTensor(out3)) { |
3813 | at::functionalization::impl::sync(out3); |
3814 | out3_ = at::functionalization::impl::from_functional_tensor(out3); |
3815 | } else { |
3816 | out3_ = out3; |
3817 | } |
3818 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) { |
3819 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var))) { |
3820 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3821 | TORCH_INTERNAL_ASSERT(false, |
3822 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3823 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3824 | } else { |
3825 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3826 | at::AutoDispatchSkipFunctionalize guard; |
3827 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::cudnn_batch_norm_out::call(input_, weight_, bias_, running_mean_, running_var_, training, exponential_average_factor, epsilon, out0_, out1_, out2_, out3_); |
3828 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);; |
3829 | } |
3830 | } else { |
3831 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output; |
3832 | { |
3833 | at::AutoDispatchSkipFunctionalize guard; |
3834 | tmp_output = at::_ops::cudnn_batch_norm::call(input_, weight_, bias_, running_mean_, running_var_, training, exponential_average_factor, epsilon); |
3835 | } |
3836 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
3837 | at::functionalization::impl::commit_update(out0); |
3838 | at::functionalization::impl::sync(out0); |
3839 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
3840 | at::functionalization::impl::commit_update(out1); |
3841 | at::functionalization::impl::sync(out1); |
3842 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
3843 | at::functionalization::impl::commit_update(out2); |
3844 | at::functionalization::impl::sync(out2); |
3845 | at::functionalization::impl::replace_(out3, std::get<3>(tmp_output)); |
3846 | at::functionalization::impl::commit_update(out3); |
3847 | at::functionalization::impl::sync(out3); |
3848 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3); |
3849 | } |
3850 | } |
3851 | |
3852 | ::std::tuple<at::Tensor &,at::Tensor &> mps_convolution_transpose_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) { |
3853 | if (false) { |
3854 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3855 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3856 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3857 | auto self_meta = to_meta(self); |
3858 | auto grad_output_meta = to_meta(grad_output); |
3859 | auto weight_meta = to_meta(weight); |
3860 | auto out0_meta = to_meta(out0); |
3861 | auto out1_meta = to_meta(out1); |
3862 | at::AutoDispatchSkipFunctionalize func_guard; |
3863 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3864 | at::_ops::mps_convolution_transpose_backward_out::call(self_meta, grad_output_meta, weight_meta, padding, output_padding, stride, dilation, groups, output_mask, out0_meta, out1_meta); |
3865 | } |
3866 | |
3867 | at::Tensor self_; |
3868 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3869 | at::functionalization::impl::sync(self); |
3870 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3871 | } else { |
3872 | self_ = self; |
3873 | } |
3874 | |
3875 | at::Tensor grad_output_; |
3876 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
3877 | at::functionalization::impl::sync(grad_output); |
3878 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
3879 | } else { |
3880 | grad_output_ = grad_output; |
3881 | } |
3882 | |
3883 | at::Tensor weight_; |
3884 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
3885 | at::functionalization::impl::sync(weight); |
3886 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
3887 | } else { |
3888 | weight_ = weight; |
3889 | } |
3890 | |
3891 | at::Tensor out0_; |
3892 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
3893 | at::functionalization::impl::sync(out0); |
3894 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
3895 | } else { |
3896 | out0_ = out0; |
3897 | } |
3898 | |
3899 | at::Tensor out1_; |
3900 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
3901 | at::functionalization::impl::sync(out1); |
3902 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
3903 | } else { |
3904 | out1_ = out1; |
3905 | } |
3906 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
3907 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight))) { |
3908 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3909 | TORCH_INTERNAL_ASSERT(false, |
3910 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3911 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3912 | } else { |
3913 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3914 | at::AutoDispatchSkipFunctionalize guard; |
3915 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::mps_convolution_transpose_backward_out::call(self_, grad_output_, weight_, padding, output_padding, stride, dilation, groups, output_mask, out0_, out1_); |
3916 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
3917 | } |
3918 | } else { |
3919 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
3920 | { |
3921 | at::AutoDispatchSkipFunctionalize guard; |
3922 | tmp_output = at::_ops::mps_convolution_transpose_backward::call(self_, grad_output_, weight_, padding, output_padding, stride, dilation, groups, output_mask); |
3923 | } |
3924 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
3925 | at::functionalization::impl::commit_update(out0); |
3926 | at::functionalization::impl::sync(out0); |
3927 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
3928 | at::functionalization::impl::commit_update(out1); |
3929 | at::functionalization::impl::sync(out1); |
3930 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
3931 | } |
3932 | } |
3933 | |
3934 | ::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) { |
3935 | if (false) { |
3936 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3937 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3938 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3939 | auto self_meta = to_meta(self); |
3940 | auto grid_meta = to_meta(grid); |
3941 | auto grad_output_meta = to_meta(grad_output); |
3942 | auto out0_meta = to_meta(out0); |
3943 | auto out1_meta = to_meta(out1); |
3944 | at::AutoDispatchSkipFunctionalize func_guard; |
3945 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3946 | at::_ops::cudnn_grid_sampler_backward_out::call(self_meta, grid_meta, grad_output_meta, out0_meta, out1_meta); |
3947 | } |
3948 | |
3949 | at::Tensor self_; |
3950 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3951 | at::functionalization::impl::sync(self); |
3952 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3953 | } else { |
3954 | self_ = self; |
3955 | } |
3956 | |
3957 | at::Tensor grid_; |
3958 | if (at::functionalization::impl::isFunctionalTensor(grid)) { |
3959 | at::functionalization::impl::sync(grid); |
3960 | grid_ = at::functionalization::impl::from_functional_tensor(grid); |
3961 | } else { |
3962 | grid_ = grid; |
3963 | } |
3964 | |
3965 | at::Tensor grad_output_; |
3966 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
3967 | at::functionalization::impl::sync(grad_output); |
3968 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
3969 | } else { |
3970 | grad_output_ = grad_output; |
3971 | } |
3972 | |
3973 | at::Tensor out0_; |
3974 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
3975 | at::functionalization::impl::sync(out0); |
3976 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
3977 | } else { |
3978 | out0_ = out0; |
3979 | } |
3980 | |
3981 | at::Tensor out1_; |
3982 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
3983 | at::functionalization::impl::sync(out1); |
3984 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
3985 | } else { |
3986 | out1_ = out1; |
3987 | } |
3988 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
3989 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grid) || at::functionalization::impl::isFunctionalTensor(grad_output))) { |
3990 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3991 | TORCH_INTERNAL_ASSERT(false, |
3992 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3993 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3994 | } else { |
3995 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3996 | at::AutoDispatchSkipFunctionalize guard; |
3997 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::cudnn_grid_sampler_backward_out::call(self_, grid_, grad_output_, out0_, out1_); |
3998 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
3999 | } |
4000 | } else { |
4001 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
4002 | { |
4003 | at::AutoDispatchSkipFunctionalize guard; |
4004 | tmp_output = at::_ops::cudnn_grid_sampler_backward::call(self_, grid_, grad_output_); |
4005 | } |
4006 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
4007 | at::functionalization::impl::commit_update(out0); |
4008 | at::functionalization::impl::sync(out0); |
4009 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
4010 | at::functionalization::impl::commit_update(out1); |
4011 | at::functionalization::impl::sync(out1); |
4012 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
4013 | } |
4014 | } |
4015 | |
4016 | ::std::tuple<at::Tensor &,at::Tensor &> cummin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) { |
4017 | if (false) { |
4018 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4019 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4020 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4021 | auto self_meta = to_meta(self); |
4022 | auto values_meta = to_meta(values); |
4023 | auto indices_meta = to_meta(indices); |
4024 | at::AutoDispatchSkipFunctionalize func_guard; |
4025 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4026 | at::_ops::cummin_out::call(self_meta, dim, values_meta, indices_meta); |
4027 | } |
4028 | |
4029 | at::Tensor self_; |
4030 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4031 | at::functionalization::impl::sync(self); |
4032 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4033 | } else { |
4034 | self_ = self; |
4035 | } |
4036 | |
4037 | at::Tensor values_; |
4038 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
4039 | at::functionalization::impl::sync(values); |
4040 | values_ = at::functionalization::impl::from_functional_tensor(values); |
4041 | } else { |
4042 | values_ = values; |
4043 | } |
4044 | |
4045 | at::Tensor indices_; |
4046 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
4047 | at::functionalization::impl::sync(indices); |
4048 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
4049 | } else { |
4050 | indices_ = indices; |
4051 | } |
4052 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
4053 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4054 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4055 | TORCH_INTERNAL_ASSERT(false, |
4056 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4057 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4058 | } else { |
4059 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4060 | at::AutoDispatchSkipFunctionalize guard; |
4061 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::cummin_out::call(self_, dim, values_, indices_); |
4062 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
4063 | } |
4064 | } else { |
4065 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
4066 | { |
4067 | at::AutoDispatchSkipFunctionalize guard; |
4068 | tmp_output = at::_ops::cummin::call(self_, dim); |
4069 | } |
4070 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
4071 | at::functionalization::impl::commit_update(values); |
4072 | at::functionalization::impl::sync(values); |
4073 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
4074 | at::functionalization::impl::commit_update(indices); |
4075 | at::functionalization::impl::sync(indices); |
4076 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
4077 | } |
4078 | } |
4079 | |
4080 | ::std::tuple<at::Tensor &,at::Tensor &> cummin_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) { |
4081 | if (false) { |
4082 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4083 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4084 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4085 | auto self_meta = to_meta(self); |
4086 | auto values_meta = to_meta(values); |
4087 | auto indices_meta = to_meta(indices); |
4088 | at::AutoDispatchSkipFunctionalize func_guard; |
4089 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4090 | at::_ops::cummin_dimname_out::call(self_meta, dim, values_meta, indices_meta); |
4091 | } |
4092 | |
4093 | at::Tensor self_; |
4094 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4095 | at::functionalization::impl::sync(self); |
4096 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4097 | } else { |
4098 | self_ = self; |
4099 | } |
4100 | |
4101 | at::Tensor values_; |
4102 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
4103 | at::functionalization::impl::sync(values); |
4104 | values_ = at::functionalization::impl::from_functional_tensor(values); |
4105 | } else { |
4106 | values_ = values; |
4107 | } |
4108 | |
4109 | at::Tensor indices_; |
4110 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
4111 | at::functionalization::impl::sync(indices); |
4112 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
4113 | } else { |
4114 | indices_ = indices; |
4115 | } |
4116 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
4117 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4118 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4119 | TORCH_INTERNAL_ASSERT(false, |
4120 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4121 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4122 | } else { |
4123 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4124 | at::AutoDispatchSkipFunctionalize guard; |
4125 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::cummin_dimname_out::call(self_, dim, values_, indices_); |
4126 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
4127 | } |
4128 | } else { |
4129 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
4130 | { |
4131 | at::AutoDispatchSkipFunctionalize guard; |
4132 | tmp_output = at::_ops::cummin_dimname::call(self_, dim); |
4133 | } |
4134 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
4135 | at::functionalization::impl::commit_update(values); |
4136 | at::functionalization::impl::sync(values); |
4137 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
4138 | at::functionalization::impl::commit_update(indices); |
4139 | at::functionalization::impl::sync(indices); |
4140 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
4141 | } |
4142 | } |
4143 | |
4144 | at::Tensor & cumsum_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
4145 | if (false) { |
4146 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4147 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4148 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4149 | auto self_meta = to_meta(self); |
4150 | auto out_meta = to_meta(out); |
4151 | at::AutoDispatchSkipFunctionalize func_guard; |
4152 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4153 | at::_ops::cumsum_out::call(self_meta, dim, dtype, out_meta); |
4154 | } |
4155 | |
4156 | at::Tensor self_; |
4157 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4158 | at::functionalization::impl::sync(self); |
4159 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4160 | } else { |
4161 | self_ = self; |
4162 | } |
4163 | |
4164 | at::Tensor out_; |
4165 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4166 | at::functionalization::impl::sync(out); |
4167 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4168 | } else { |
4169 | out_ = out; |
4170 | } |
4171 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4172 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4173 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4174 | TORCH_INTERNAL_ASSERT(false, |
4175 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4176 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4177 | } else { |
4178 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4179 | at::AutoDispatchSkipFunctionalize guard; |
4180 | at::Tensor tmp_output = at::_ops::cumsum_out::call(self_, dim, dtype, out_); |
4181 | return out;; |
4182 | } |
4183 | } else { |
4184 | at::Tensor tmp_output; |
4185 | { |
4186 | at::AutoDispatchSkipFunctionalize guard; |
4187 | tmp_output = at::_ops::cumsum::call(self_, dim, dtype); |
4188 | } |
4189 | at::functionalization::impl::replace_(out, tmp_output); |
4190 | at::functionalization::impl::commit_update(out); |
4191 | at::functionalization::impl::sync(out); |
4192 | return out; |
4193 | } |
4194 | } |
4195 | |
4196 | at::Tensor & cumsum_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
4197 | if (true) { |
4198 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4199 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4200 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4201 | auto self_meta = to_meta(self); |
4202 | at::AutoDispatchSkipFunctionalize func_guard; |
4203 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4204 | at::_ops::cumsum_::call(self_meta, dim, dtype); |
4205 | } |
4206 | |
4207 | at::Tensor self_; |
4208 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4209 | at::functionalization::impl::sync(self); |
4210 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4211 | } else { |
4212 | self_ = self; |
4213 | } |
4214 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
4215 | if ((false)) { |
4216 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4217 | TORCH_INTERNAL_ASSERT(false, |
4218 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4219 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4220 | } else { |
4221 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4222 | at::AutoDispatchSkipFunctionalize guard; |
4223 | at::Tensor tmp_output = at::_ops::cumsum_::call(self_, dim, dtype); |
4224 | return self;; |
4225 | } |
4226 | } else { |
4227 | at::Tensor tmp_output; |
4228 | { |
4229 | at::AutoDispatchSkipFunctionalize guard; |
4230 | tmp_output = at::_ops::cumsum::call(self_, dim, dtype); |
4231 | } |
4232 | at::functionalization::impl::replace_(self, tmp_output); |
4233 | at::functionalization::impl::commit_update(self); |
4234 | at::functionalization::impl::sync(self); |
4235 | return self; |
4236 | } |
4237 | } |
4238 | |
4239 | at::Tensor & cumsum_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
4240 | if (false) { |
4241 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4242 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4243 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4244 | auto self_meta = to_meta(self); |
4245 | auto out_meta = to_meta(out); |
4246 | at::AutoDispatchSkipFunctionalize func_guard; |
4247 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4248 | at::_ops::cumsum_dimname_out::call(self_meta, dim, dtype, out_meta); |
4249 | } |
4250 | |
4251 | at::Tensor self_; |
4252 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4253 | at::functionalization::impl::sync(self); |
4254 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4255 | } else { |
4256 | self_ = self; |
4257 | } |
4258 | |
4259 | at::Tensor out_; |
4260 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4261 | at::functionalization::impl::sync(out); |
4262 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4263 | } else { |
4264 | out_ = out; |
4265 | } |
4266 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4267 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4268 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4269 | TORCH_INTERNAL_ASSERT(false, |
4270 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4271 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4272 | } else { |
4273 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4274 | at::AutoDispatchSkipFunctionalize guard; |
4275 | at::Tensor tmp_output = at::_ops::cumsum_dimname_out::call(self_, dim, dtype, out_); |
4276 | return out;; |
4277 | } |
4278 | } else { |
4279 | at::Tensor tmp_output; |
4280 | { |
4281 | at::AutoDispatchSkipFunctionalize guard; |
4282 | tmp_output = at::_ops::cumsum_dimname::call(self_, dim, dtype); |
4283 | } |
4284 | at::functionalization::impl::replace_(out, tmp_output); |
4285 | at::functionalization::impl::commit_update(out); |
4286 | at::functionalization::impl::sync(out); |
4287 | return out; |
4288 | } |
4289 | } |
4290 | |
4291 | at::Tensor & cumsum__dimname(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
4292 | if (true) { |
4293 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4294 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4295 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4296 | auto self_meta = to_meta(self); |
4297 | at::AutoDispatchSkipFunctionalize func_guard; |
4298 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4299 | at::_ops::cumsum__dimname::call(self_meta, dim, dtype); |
4300 | } |
4301 | |
4302 | at::Tensor self_; |
4303 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4304 | at::functionalization::impl::sync(self); |
4305 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4306 | } else { |
4307 | self_ = self; |
4308 | } |
4309 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
4310 | if ((false)) { |
4311 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4312 | TORCH_INTERNAL_ASSERT(false, |
4313 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4314 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4315 | } else { |
4316 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4317 | at::AutoDispatchSkipFunctionalize guard; |
4318 | at::Tensor tmp_output = at::_ops::cumsum__dimname::call(self_, dim, dtype); |
4319 | return self;; |
4320 | } |
4321 | } else { |
4322 | at::Tensor tmp_output; |
4323 | { |
4324 | at::AutoDispatchSkipFunctionalize guard; |
4325 | tmp_output = at::_ops::cumsum_dimname::call(self_, dim, dtype); |
4326 | } |
4327 | at::functionalization::impl::replace_(self, tmp_output); |
4328 | at::functionalization::impl::commit_update(self); |
4329 | at::functionalization::impl::sync(self); |
4330 | return self; |
4331 | } |
4332 | } |
4333 | |
4334 | ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { |
4335 | if (false) { |
4336 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4337 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4338 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4339 | auto log_probs_meta = to_meta(log_probs); |
4340 | auto targets_meta = to_meta(targets); |
4341 | auto out0_meta = to_meta(out0); |
4342 | auto out1_meta = to_meta(out1); |
4343 | at::AutoDispatchSkipFunctionalize func_guard; |
4344 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4345 | at::_ops::_ctc_loss_out::call(log_probs_meta, targets_meta, input_lengths, target_lengths, blank, zero_infinity, out0_meta, out1_meta); |
4346 | } |
4347 | |
4348 | at::Tensor log_probs_; |
4349 | if (at::functionalization::impl::isFunctionalTensor(log_probs)) { |
4350 | at::functionalization::impl::sync(log_probs); |
4351 | log_probs_ = at::functionalization::impl::from_functional_tensor(log_probs); |
4352 | } else { |
4353 | log_probs_ = log_probs; |
4354 | } |
4355 | |
4356 | at::Tensor targets_; |
4357 | if (at::functionalization::impl::isFunctionalTensor(targets)) { |
4358 | at::functionalization::impl::sync(targets); |
4359 | targets_ = at::functionalization::impl::from_functional_tensor(targets); |
4360 | } else { |
4361 | targets_ = targets; |
4362 | } |
4363 | |
4364 | at::Tensor out0_; |
4365 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
4366 | at::functionalization::impl::sync(out0); |
4367 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
4368 | } else { |
4369 | out0_ = out0; |
4370 | } |
4371 | |
4372 | at::Tensor out1_; |
4373 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
4374 | at::functionalization::impl::sync(out1); |
4375 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
4376 | } else { |
4377 | out1_ = out1; |
4378 | } |
4379 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
4380 | if ((false || at::functionalization::impl::isFunctionalTensor(log_probs) || at::functionalization::impl::isFunctionalTensor(targets))) { |
4381 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4382 | TORCH_INTERNAL_ASSERT(false, |
4383 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4384 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4385 | } else { |
4386 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4387 | at::AutoDispatchSkipFunctionalize guard; |
4388 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_ctc_loss_out::call(log_probs_, targets_, input_lengths, target_lengths, blank, zero_infinity, out0_, out1_); |
4389 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
4390 | } |
4391 | } else { |
4392 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
4393 | { |
4394 | at::AutoDispatchSkipFunctionalize guard; |
4395 | tmp_output = at::_ops::_ctc_loss::call(log_probs_, targets_, input_lengths, target_lengths, blank, zero_infinity); |
4396 | } |
4397 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
4398 | at::functionalization::impl::commit_update(out0); |
4399 | at::functionalization::impl::sync(out0); |
4400 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
4401 | at::functionalization::impl::commit_update(out1); |
4402 | at::functionalization::impl::sync(out1); |
4403 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
4404 | } |
4405 | } |
4406 | |
4407 | ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { |
4408 | if (false) { |
4409 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4410 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4411 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4412 | auto log_probs_meta = to_meta(log_probs); |
4413 | auto targets_meta = to_meta(targets); |
4414 | auto input_lengths_meta = to_meta(input_lengths); |
4415 | auto target_lengths_meta = to_meta(target_lengths); |
4416 | auto out0_meta = to_meta(out0); |
4417 | auto out1_meta = to_meta(out1); |
4418 | at::AutoDispatchSkipFunctionalize func_guard; |
4419 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4420 | at::_ops::_ctc_loss_Tensor_out::call(log_probs_meta, targets_meta, input_lengths_meta, target_lengths_meta, blank, zero_infinity, out0_meta, out1_meta); |
4421 | } |
4422 | |
4423 | at::Tensor log_probs_; |
4424 | if (at::functionalization::impl::isFunctionalTensor(log_probs)) { |
4425 | at::functionalization::impl::sync(log_probs); |
4426 | log_probs_ = at::functionalization::impl::from_functional_tensor(log_probs); |
4427 | } else { |
4428 | log_probs_ = log_probs; |
4429 | } |
4430 | |
4431 | at::Tensor targets_; |
4432 | if (at::functionalization::impl::isFunctionalTensor(targets)) { |
4433 | at::functionalization::impl::sync(targets); |
4434 | targets_ = at::functionalization::impl::from_functional_tensor(targets); |
4435 | } else { |
4436 | targets_ = targets; |
4437 | } |
4438 | |
4439 | at::Tensor input_lengths_; |
4440 | if (at::functionalization::impl::isFunctionalTensor(input_lengths)) { |
4441 | at::functionalization::impl::sync(input_lengths); |
4442 | input_lengths_ = at::functionalization::impl::from_functional_tensor(input_lengths); |
4443 | } else { |
4444 | input_lengths_ = input_lengths; |
4445 | } |
4446 | |
4447 | at::Tensor target_lengths_; |
4448 | if (at::functionalization::impl::isFunctionalTensor(target_lengths)) { |
4449 | at::functionalization::impl::sync(target_lengths); |
4450 | target_lengths_ = at::functionalization::impl::from_functional_tensor(target_lengths); |
4451 | } else { |
4452 | target_lengths_ = target_lengths; |
4453 | } |
4454 | |
4455 | at::Tensor out0_; |
4456 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
4457 | at::functionalization::impl::sync(out0); |
4458 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
4459 | } else { |
4460 | out0_ = out0; |
4461 | } |
4462 | |
4463 | at::Tensor out1_; |
4464 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
4465 | at::functionalization::impl::sync(out1); |
4466 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
4467 | } else { |
4468 | out1_ = out1; |
4469 | } |
4470 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
4471 | if ((false || at::functionalization::impl::isFunctionalTensor(log_probs) || at::functionalization::impl::isFunctionalTensor(targets) || at::functionalization::impl::isFunctionalTensor(input_lengths) || at::functionalization::impl::isFunctionalTensor(target_lengths))) { |
4472 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4473 | TORCH_INTERNAL_ASSERT(false, |
4474 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4475 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4476 | } else { |
4477 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4478 | at::AutoDispatchSkipFunctionalize guard; |
4479 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_ctc_loss_Tensor_out::call(log_probs_, targets_, input_lengths_, target_lengths_, blank, zero_infinity, out0_, out1_); |
4480 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
4481 | } |
4482 | } else { |
4483 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
4484 | { |
4485 | at::AutoDispatchSkipFunctionalize guard; |
4486 | tmp_output = at::_ops::_ctc_loss_Tensor::call(log_probs_, targets_, input_lengths_, target_lengths_, blank, zero_infinity); |
4487 | } |
4488 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
4489 | at::functionalization::impl::commit_update(out0); |
4490 | at::functionalization::impl::sync(out0); |
4491 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
4492 | at::functionalization::impl::commit_update(out1); |
4493 | at::functionalization::impl::sync(out1); |
4494 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
4495 | } |
4496 | } |
4497 | |
4498 | at::Tensor & _ctc_loss_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out) { |
4499 | if (false) { |
4500 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4501 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4502 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4503 | auto grad_meta = to_meta(grad); |
4504 | auto log_probs_meta = to_meta(log_probs); |
4505 | auto targets_meta = to_meta(targets); |
4506 | auto neg_log_likelihood_meta = to_meta(neg_log_likelihood); |
4507 | auto log_alpha_meta = to_meta(log_alpha); |
4508 | auto out_meta = to_meta(out); |
4509 | at::AutoDispatchSkipFunctionalize func_guard; |
4510 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4511 | at::_ops::_ctc_loss_backward_out::call(grad_meta, log_probs_meta, targets_meta, input_lengths, target_lengths, neg_log_likelihood_meta, log_alpha_meta, blank, zero_infinity, out_meta); |
4512 | } |
4513 | |
4514 | at::Tensor grad_; |
4515 | if (at::functionalization::impl::isFunctionalTensor(grad)) { |
4516 | at::functionalization::impl::sync(grad); |
4517 | grad_ = at::functionalization::impl::from_functional_tensor(grad); |
4518 | } else { |
4519 | grad_ = grad; |
4520 | } |
4521 | |
4522 | at::Tensor log_probs_; |
4523 | if (at::functionalization::impl::isFunctionalTensor(log_probs)) { |
4524 | at::functionalization::impl::sync(log_probs); |
4525 | log_probs_ = at::functionalization::impl::from_functional_tensor(log_probs); |
4526 | } else { |
4527 | log_probs_ = log_probs; |
4528 | } |
4529 | |
4530 | at::Tensor targets_; |
4531 | if (at::functionalization::impl::isFunctionalTensor(targets)) { |
4532 | at::functionalization::impl::sync(targets); |
4533 | targets_ = at::functionalization::impl::from_functional_tensor(targets); |
4534 | } else { |
4535 | targets_ = targets; |
4536 | } |
4537 | |
4538 | at::Tensor neg_log_likelihood_; |
4539 | if (at::functionalization::impl::isFunctionalTensor(neg_log_likelihood)) { |
4540 | at::functionalization::impl::sync(neg_log_likelihood); |
4541 | neg_log_likelihood_ = at::functionalization::impl::from_functional_tensor(neg_log_likelihood); |
4542 | } else { |
4543 | neg_log_likelihood_ = neg_log_likelihood; |
4544 | } |
4545 | |
4546 | at::Tensor log_alpha_; |
4547 | if (at::functionalization::impl::isFunctionalTensor(log_alpha)) { |
4548 | at::functionalization::impl::sync(log_alpha); |
4549 | log_alpha_ = at::functionalization::impl::from_functional_tensor(log_alpha); |
4550 | } else { |
4551 | log_alpha_ = log_alpha; |
4552 | } |
4553 | |
4554 | at::Tensor out_; |
4555 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4556 | at::functionalization::impl::sync(out); |
4557 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4558 | } else { |
4559 | out_ = out; |
4560 | } |
4561 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4562 | if ((false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(log_probs) || at::functionalization::impl::isFunctionalTensor(targets) || at::functionalization::impl::isFunctionalTensor(neg_log_likelihood) || at::functionalization::impl::isFunctionalTensor(log_alpha))) { |
4563 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4564 | TORCH_INTERNAL_ASSERT(false, |
4565 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4566 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4567 | } else { |
4568 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4569 | at::AutoDispatchSkipFunctionalize guard; |
4570 | at::Tensor tmp_output = at::_ops::_ctc_loss_backward_out::call(grad_, log_probs_, targets_, input_lengths, target_lengths, neg_log_likelihood_, log_alpha_, blank, zero_infinity, out_); |
4571 | return out;; |
4572 | } |
4573 | } else { |
4574 | at::Tensor tmp_output; |
4575 | { |
4576 | at::AutoDispatchSkipFunctionalize guard; |
4577 | tmp_output = at::_ops::_ctc_loss_backward::call(grad_, log_probs_, targets_, input_lengths, target_lengths, neg_log_likelihood_, log_alpha_, blank, zero_infinity); |
4578 | } |
4579 | at::functionalization::impl::replace_(out, tmp_output); |
4580 | at::functionalization::impl::commit_update(out); |
4581 | at::functionalization::impl::sync(out); |
4582 | return out; |
4583 | } |
4584 | } |
4585 | |
4586 | at::Tensor & embedding_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) { |
4587 | if (false) { |
4588 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4589 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4590 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4591 | auto weight_meta = to_meta(weight); |
4592 | auto indices_meta = to_meta(indices); |
4593 | auto out_meta = to_meta(out); |
4594 | at::AutoDispatchSkipFunctionalize func_guard; |
4595 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4596 | at::_ops::embedding_out::call(weight_meta, indices_meta, padding_idx, scale_grad_by_freq, sparse, out_meta); |
4597 | } |
4598 | |
4599 | at::Tensor weight_; |
4600 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
4601 | at::functionalization::impl::sync(weight); |
4602 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
4603 | } else { |
4604 | weight_ = weight; |
4605 | } |
4606 | |
4607 | at::Tensor indices_; |
4608 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
4609 | at::functionalization::impl::sync(indices); |
4610 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
4611 | } else { |
4612 | indices_ = indices; |
4613 | } |
4614 | |
4615 | at::Tensor out_; |
4616 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4617 | at::functionalization::impl::sync(out); |
4618 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4619 | } else { |
4620 | out_ = out; |
4621 | } |
4622 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4623 | if ((false || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(indices))) { |
4624 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4625 | TORCH_INTERNAL_ASSERT(false, |
4626 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4627 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4628 | } else { |
4629 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4630 | at::AutoDispatchSkipFunctionalize guard; |
4631 | at::Tensor tmp_output = at::_ops::embedding_out::call(weight_, indices_, padding_idx, scale_grad_by_freq, sparse, out_); |
4632 | return out;; |
4633 | } |
4634 | } else { |
4635 | at::Tensor tmp_output; |
4636 | { |
4637 | at::AutoDispatchSkipFunctionalize guard; |
4638 | tmp_output = at::_ops::embedding::call(weight_, indices_, padding_idx, scale_grad_by_freq, sparse); |
4639 | } |
4640 | at::functionalization::impl::replace_(out, tmp_output); |
4641 | at::functionalization::impl::commit_update(out); |
4642 | at::functionalization::impl::sync(out); |
4643 | return out; |
4644 | } |
4645 | } |
4646 | |
4647 | at::Tensor & embedding_dense_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) { |
4648 | if (false) { |
4649 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4650 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4651 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4652 | auto grad_output_meta = to_meta(grad_output); |
4653 | auto indices_meta = to_meta(indices); |
4654 | auto out_meta = to_meta(out); |
4655 | at::AutoDispatchSkipFunctionalize func_guard; |
4656 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4657 | at::_ops::embedding_dense_backward_out::call(grad_output_meta, indices_meta, num_weights, padding_idx, scale_grad_by_freq, out_meta); |
4658 | } |
4659 | |
4660 | at::Tensor grad_output_; |
4661 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
4662 | at::functionalization::impl::sync(grad_output); |
4663 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
4664 | } else { |
4665 | grad_output_ = grad_output; |
4666 | } |
4667 | |
4668 | at::Tensor indices_; |
4669 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
4670 | at::functionalization::impl::sync(indices); |
4671 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
4672 | } else { |
4673 | indices_ = indices; |
4674 | } |
4675 | |
4676 | at::Tensor out_; |
4677 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4678 | at::functionalization::impl::sync(out); |
4679 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4680 | } else { |
4681 | out_ = out; |
4682 | } |
4683 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4684 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(indices))) { |
4685 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4686 | TORCH_INTERNAL_ASSERT(false, |
4687 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4688 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4689 | } else { |
4690 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4691 | at::AutoDispatchSkipFunctionalize guard; |
4692 | at::Tensor tmp_output = at::_ops::embedding_dense_backward_out::call(grad_output_, indices_, num_weights, padding_idx, scale_grad_by_freq, out_); |
4693 | return out;; |
4694 | } |
4695 | } else { |
4696 | at::Tensor tmp_output; |
4697 | { |
4698 | at::AutoDispatchSkipFunctionalize guard; |
4699 | tmp_output = at::_ops::embedding_dense_backward::call(grad_output_, indices_, num_weights, padding_idx, scale_grad_by_freq); |
4700 | } |
4701 | at::functionalization::impl::replace_(out, tmp_output); |
4702 | at::functionalization::impl::commit_update(out); |
4703 | at::functionalization::impl::sync(out); |
4704 | return out; |
4705 | } |
4706 | } |
4707 | |
4708 | at::Tensor & new_zeros_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
4709 | if (false) { |
4710 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4711 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4712 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4713 | auto self_meta = to_meta(self); |
4714 | auto out_meta = to_meta(out); |
4715 | at::AutoDispatchSkipFunctionalize func_guard; |
4716 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4717 | at::_ops::new_zeros_out::call(self_meta, size, out_meta); |
4718 | } |
4719 | |
4720 | at::Tensor self_; |
4721 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4722 | at::functionalization::impl::sync(self); |
4723 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4724 | } else { |
4725 | self_ = self; |
4726 | } |
4727 | |
4728 | at::Tensor out_; |
4729 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4730 | at::functionalization::impl::sync(out); |
4731 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4732 | } else { |
4733 | out_ = out; |
4734 | } |
4735 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4736 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4737 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4738 | TORCH_INTERNAL_ASSERT(false, |
4739 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4740 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4741 | } else { |
4742 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4743 | at::AutoDispatchSkipFunctionalize guard; |
4744 | at::Tensor tmp_output = at::_ops::new_zeros_out::call(self_, size, out_); |
4745 | return out;; |
4746 | } |
4747 | } else { |
4748 | at::Tensor tmp_output; |
4749 | { |
4750 | at::AutoDispatchSkipFunctionalize guard; |
4751 | tmp_output = at::_ops::new_zeros::call(self_, size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
4752 | } |
4753 | at::functionalization::impl::replace_(out, tmp_output); |
4754 | at::functionalization::impl::commit_update(out); |
4755 | at::functionalization::impl::sync(out); |
4756 | return out; |
4757 | } |
4758 | } |
4759 | |
4760 | at::Tensor & new_ones_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
4761 | if (false) { |
4762 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4763 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4764 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4765 | auto self_meta = to_meta(self); |
4766 | auto out_meta = to_meta(out); |
4767 | at::AutoDispatchSkipFunctionalize func_guard; |
4768 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4769 | at::_ops::new_ones_out::call(self_meta, size, out_meta); |
4770 | } |
4771 | |
4772 | at::Tensor self_; |
4773 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4774 | at::functionalization::impl::sync(self); |
4775 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4776 | } else { |
4777 | self_ = self; |
4778 | } |
4779 | |
4780 | at::Tensor out_; |
4781 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4782 | at::functionalization::impl::sync(out); |
4783 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4784 | } else { |
4785 | out_ = out; |
4786 | } |
4787 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4788 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4789 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4790 | TORCH_INTERNAL_ASSERT(false, |
4791 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4792 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4793 | } else { |
4794 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4795 | at::AutoDispatchSkipFunctionalize guard; |
4796 | at::Tensor tmp_output = at::_ops::new_ones_out::call(self_, size, out_); |
4797 | return out;; |
4798 | } |
4799 | } else { |
4800 | at::Tensor tmp_output; |
4801 | { |
4802 | at::AutoDispatchSkipFunctionalize guard; |
4803 | tmp_output = at::_ops::new_ones::call(self_, size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
4804 | } |
4805 | at::functionalization::impl::replace_(out, tmp_output); |
4806 | at::functionalization::impl::commit_update(out); |
4807 | at::functionalization::impl::sync(out); |
4808 | return out; |
4809 | } |
4810 | } |
4811 | |
4812 | at::Tensor & _empty_per_channel_affine_quantized_out_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
4813 | if (false) { |
4814 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4815 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4816 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4817 | auto scales_meta = to_meta(scales); |
4818 | auto zero_points_meta = to_meta(zero_points); |
4819 | auto out_meta = to_meta(out); |
4820 | at::AutoDispatchSkipFunctionalize func_guard; |
4821 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4822 | at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales_meta, zero_points_meta, axis, memory_format, out_meta); |
4823 | } |
4824 | |
4825 | at::Tensor scales_; |
4826 | if (at::functionalization::impl::isFunctionalTensor(scales)) { |
4827 | at::functionalization::impl::sync(scales); |
4828 | scales_ = at::functionalization::impl::from_functional_tensor(scales); |
4829 | } else { |
4830 | scales_ = scales; |
4831 | } |
4832 | |
4833 | at::Tensor zero_points_; |
4834 | if (at::functionalization::impl::isFunctionalTensor(zero_points)) { |
4835 | at::functionalization::impl::sync(zero_points); |
4836 | zero_points_ = at::functionalization::impl::from_functional_tensor(zero_points); |
4837 | } else { |
4838 | zero_points_ = zero_points; |
4839 | } |
4840 | |
4841 | at::Tensor out_; |
4842 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4843 | at::functionalization::impl::sync(out); |
4844 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4845 | } else { |
4846 | out_ = out; |
4847 | } |
4848 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4849 | if ((false || at::functionalization::impl::isFunctionalTensor(scales) || at::functionalization::impl::isFunctionalTensor(zero_points))) { |
4850 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4851 | TORCH_INTERNAL_ASSERT(false, |
4852 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4853 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4854 | } else { |
4855 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4856 | at::AutoDispatchSkipFunctionalize guard; |
4857 | at::Tensor tmp_output = at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales_, zero_points_, axis, memory_format, out_); |
4858 | return out;; |
4859 | } |
4860 | } else { |
4861 | at::Tensor tmp_output; |
4862 | { |
4863 | at::AutoDispatchSkipFunctionalize guard; |
4864 | tmp_output = at::_ops::_empty_per_channel_affine_quantized::call(size, scales_, zero_points_, axis, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, memory_format); |
4865 | } |
4866 | at::functionalization::impl::replace_(out, tmp_output); |
4867 | at::functionalization::impl::commit_update(out); |
4868 | at::functionalization::impl::sync(out); |
4869 | return out; |
4870 | } |
4871 | } |
4872 | |
4873 | const at::Tensor & resize_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) { |
4874 | if (false) { |
4875 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4876 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4877 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4878 | auto self_meta = to_meta(self); |
4879 | auto out_meta = to_meta(out); |
4880 | at::AutoDispatchSkipFunctionalize func_guard; |
4881 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4882 | at::_ops::resize_out::call(self_meta, size, memory_format, out_meta); |
4883 | } |
4884 | |
4885 | at::Tensor self_; |
4886 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4887 | at::functionalization::impl::sync(self); |
4888 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4889 | } else { |
4890 | self_ = self; |
4891 | } |
4892 | |
4893 | at::Tensor out_; |
4894 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4895 | at::functionalization::impl::sync(out); |
4896 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4897 | } else { |
4898 | out_ = out; |
4899 | } |
4900 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4901 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4902 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4903 | TORCH_INTERNAL_ASSERT(false, |
4904 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4905 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4906 | } else { |
4907 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4908 | at::AutoDispatchSkipFunctionalize guard; |
4909 | at::Tensor tmp_output = at::_ops::resize_out::call(self_, size, memory_format, out_); |
4910 | return out;; |
4911 | } |
4912 | } else { |
4913 | at::Tensor tmp_output; |
4914 | { |
4915 | at::AutoDispatchSkipFunctionalize guard; |
4916 | tmp_output = at::_ops::resize::call(self_, size, memory_format); |
4917 | } |
4918 | at::functionalization::impl::replace_(out, tmp_output); |
4919 | at::functionalization::impl::commit_update(out); |
4920 | at::functionalization::impl::sync(out); |
4921 | return out; |
4922 | } |
4923 | } |
4924 | |
4925 | const at::Tensor & resize_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
4926 | if (true) { |
4927 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4928 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4929 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4930 | auto self_meta = to_meta(self); |
4931 | at::AutoDispatchSkipFunctionalize func_guard; |
4932 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4933 | at::_ops::resize_::call(self_meta, size, memory_format); |
4934 | } |
4935 | |
4936 | at::Tensor self_; |
4937 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4938 | at::functionalization::impl::sync(self); |
4939 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4940 | } else { |
4941 | self_ = self; |
4942 | } |
4943 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
4944 | if ((false)) { |
4945 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4946 | TORCH_INTERNAL_ASSERT(false, |
4947 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4948 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4949 | } else { |
4950 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4951 | at::AutoDispatchSkipFunctionalize guard; |
4952 | at::Tensor tmp_output = at::_ops::resize_::call(self_, size, memory_format); |
4953 | return self;; |
4954 | } |
4955 | } else { |
4956 | at::Tensor tmp_output; |
4957 | { |
4958 | at::AutoDispatchSkipFunctionalize guard; |
4959 | tmp_output = at::_ops::resize::call(self_, size, memory_format); |
4960 | } |
4961 | at::functionalization::impl::replace_(self, tmp_output); |
4962 | at::functionalization::impl::commit_update(self); |
4963 | at::functionalization::impl::sync(self); |
4964 | return self; |
4965 | } |
4966 | } |
4967 | |
4968 | at::Tensor & exp2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
4969 | if (false) { |
4970 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4971 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4972 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4973 | auto self_meta = to_meta(self); |
4974 | auto out_meta = to_meta(out); |
4975 | at::AutoDispatchSkipFunctionalize func_guard; |
4976 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4977 | at::_ops::exp2_out::call(self_meta, out_meta); |
4978 | } |
4979 | |
4980 | at::Tensor self_; |
4981 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4982 | at::functionalization::impl::sync(self); |
4983 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4984 | } else { |
4985 | self_ = self; |
4986 | } |
4987 | |
4988 | at::Tensor out_; |
4989 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4990 | at::functionalization::impl::sync(out); |
4991 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4992 | } else { |
4993 | out_ = out; |
4994 | } |
4995 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4996 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4997 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4998 | TORCH_INTERNAL_ASSERT(false, |
4999 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5000 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5001 | } else { |
5002 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5003 | at::AutoDispatchSkipFunctionalize guard; |
5004 | at::Tensor tmp_output = at::_ops::exp2_out::call(self_, out_); |
5005 | return out;; |
5006 | } |
5007 | } else { |
5008 | at::Tensor tmp_output; |
5009 | { |
5010 | at::AutoDispatchSkipFunctionalize guard; |
5011 | tmp_output = at::_ops::exp2::call(self_); |
5012 | } |
5013 | at::functionalization::impl::replace_(out, tmp_output); |
5014 | at::functionalization::impl::commit_update(out); |
5015 | at::functionalization::impl::sync(out); |
5016 | return out; |
5017 | } |
5018 | } |
5019 | |
5020 | at::Tensor & exp2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
5021 | if (true) { |
5022 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5023 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5024 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5025 | auto self_meta = to_meta(self); |
5026 | at::AutoDispatchSkipFunctionalize func_guard; |
5027 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5028 | at::_ops::exp2_::call(self_meta); |
5029 | } |
5030 | |
5031 | at::Tensor self_; |
5032 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5033 | at::functionalization::impl::sync(self); |
5034 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5035 | } else { |
5036 | self_ = self; |
5037 | } |
5038 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
5039 | if ((false)) { |
5040 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5041 | TORCH_INTERNAL_ASSERT(false, |
5042 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5043 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5044 | } else { |
5045 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5046 | at::AutoDispatchSkipFunctionalize guard; |
5047 | at::Tensor tmp_output = at::_ops::exp2_::call(self_); |
5048 | return self;; |
5049 | } |
5050 | } else { |
5051 | at::Tensor tmp_output; |
5052 | { |
5053 | at::AutoDispatchSkipFunctionalize guard; |
5054 | tmp_output = at::_ops::exp2::call(self_); |
5055 | } |
5056 | at::functionalization::impl::replace_(self, tmp_output); |
5057 | at::functionalization::impl::commit_update(self); |
5058 | at::functionalization::impl::sync(self); |
5059 | return self; |
5060 | } |
5061 | } |
5062 | |
5063 | at::Tensor & _grid_sampler_2d_cpu_fallback_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { |
5064 | if (false) { |
5065 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5066 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5067 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5068 | auto input_meta = to_meta(input); |
5069 | auto grid_meta = to_meta(grid); |
5070 | auto out_meta = to_meta(out); |
5071 | at::AutoDispatchSkipFunctionalize func_guard; |
5072 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5073 | at::_ops::_grid_sampler_2d_cpu_fallback_out::call(input_meta, grid_meta, interpolation_mode, padding_mode, align_corners, out_meta); |
5074 | } |
5075 | |
5076 | at::Tensor input_; |
5077 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
5078 | at::functionalization::impl::sync(input); |
5079 | input_ = at::functionalization::impl::from_functional_tensor(input); |
5080 | } else { |
5081 | input_ = input; |
5082 | } |
5083 | |
5084 | at::Tensor grid_; |
5085 | if (at::functionalization::impl::isFunctionalTensor(grid)) { |
5086 | at::functionalization::impl::sync(grid); |
5087 | grid_ = at::functionalization::impl::from_functional_tensor(grid); |
5088 | } else { |
5089 | grid_ = grid; |
5090 | } |
5091 | |
5092 | at::Tensor out_; |
5093 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5094 | at::functionalization::impl::sync(out); |
5095 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5096 | } else { |
5097 | out_ = out; |
5098 | } |
5099 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5100 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grid))) { |
5101 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5102 | TORCH_INTERNAL_ASSERT(false, |
5103 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5104 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5105 | } else { |
5106 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5107 | at::AutoDispatchSkipFunctionalize guard; |
5108 | at::Tensor tmp_output = at::_ops::_grid_sampler_2d_cpu_fallback_out::call(input_, grid_, interpolation_mode, padding_mode, align_corners, out_); |
5109 | return out;; |
5110 | } |
5111 | } else { |
5112 | at::Tensor tmp_output; |
5113 | { |
5114 | at::AutoDispatchSkipFunctionalize guard; |
5115 | tmp_output = at::_ops::_grid_sampler_2d_cpu_fallback::call(input_, grid_, interpolation_mode, padding_mode, align_corners); |
5116 | } |
5117 | at::functionalization::impl::replace_(out, tmp_output); |
5118 | at::functionalization::impl::commit_update(out); |
5119 | at::functionalization::impl::sync(out); |
5120 | return out; |
5121 | } |
5122 | } |
5123 | |
5124 | ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_3d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) { |
5125 | if (false) { |
5126 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5127 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5128 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5129 | auto grad_output_meta = to_meta(grad_output); |
5130 | auto input_meta = to_meta(input); |
5131 | auto grid_meta = to_meta(grid); |
5132 | auto out0_meta = to_meta(out0); |
5133 | auto out1_meta = to_meta(out1); |
5134 | at::AutoDispatchSkipFunctionalize func_guard; |
5135 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5136 | at::_ops::grid_sampler_3d_backward_out::call(grad_output_meta, input_meta, grid_meta, interpolation_mode, padding_mode, align_corners, output_mask, out0_meta, out1_meta); |
5137 | } |
5138 | |
5139 | at::Tensor grad_output_; |
5140 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
5141 | at::functionalization::impl::sync(grad_output); |
5142 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
5143 | } else { |
5144 | grad_output_ = grad_output; |
5145 | } |
5146 | |
5147 | at::Tensor input_; |
5148 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
5149 | at::functionalization::impl::sync(input); |
5150 | input_ = at::functionalization::impl::from_functional_tensor(input); |
5151 | } else { |
5152 | input_ = input; |
5153 | } |
5154 | |
5155 | at::Tensor grid_; |
5156 | if (at::functionalization::impl::isFunctionalTensor(grid)) { |
5157 | at::functionalization::impl::sync(grid); |
5158 | grid_ = at::functionalization::impl::from_functional_tensor(grid); |
5159 | } else { |
5160 | grid_ = grid; |
5161 | } |
5162 | |
5163 | at::Tensor out0_; |
5164 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
5165 | at::functionalization::impl::sync(out0); |
5166 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
5167 | } else { |
5168 | out0_ = out0; |
5169 | } |
5170 | |
5171 | at::Tensor out1_; |
5172 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
5173 | at::functionalization::impl::sync(out1); |
5174 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
5175 | } else { |
5176 | out1_ = out1; |
5177 | } |
5178 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
5179 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grid))) { |
5180 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5181 | TORCH_INTERNAL_ASSERT(false, |
5182 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5183 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5184 | } else { |
5185 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5186 | at::AutoDispatchSkipFunctionalize guard; |
5187 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::grid_sampler_3d_backward_out::call(grad_output_, input_, grid_, interpolation_mode, padding_mode, align_corners, output_mask, out0_, out1_); |
5188 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
5189 | } |
5190 | } else { |
5191 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
5192 | { |
5193 | at::AutoDispatchSkipFunctionalize guard; |
5194 | tmp_output = at::_ops::grid_sampler_3d_backward::call(grad_output_, input_, grid_, interpolation_mode, padding_mode, align_corners, output_mask); |
5195 | } |
5196 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
5197 | at::functionalization::impl::commit_update(out0); |
5198 | at::functionalization::impl::sync(out0); |
5199 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
5200 | at::functionalization::impl::commit_update(out1); |
5201 | at::functionalization::impl::sync(out1); |
5202 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
5203 | } |
5204 | } |
5205 | |
5206 | at::Tensor & _fft_c2c_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) { |
5207 | if (false) { |
5208 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5209 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5210 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5211 | auto self_meta = to_meta(self); |
5212 | auto out_meta = to_meta(out); |
5213 | at::AutoDispatchSkipFunctionalize func_guard; |
5214 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5215 | at::_ops::_fft_c2c_out::call(self_meta, dim, normalization, forward, out_meta); |
5216 | } |
5217 | |
5218 | at::Tensor self_; |
5219 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5220 | at::functionalization::impl::sync(self); |
5221 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5222 | } else { |
5223 | self_ = self; |
5224 | } |
5225 | |
5226 | at::Tensor out_; |
5227 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5228 | at::functionalization::impl::sync(out); |
5229 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5230 | } else { |
5231 | out_ = out; |
5232 | } |
5233 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5234 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5235 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5236 | TORCH_INTERNAL_ASSERT(false, |
5237 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5238 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5239 | } else { |
5240 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5241 | at::AutoDispatchSkipFunctionalize guard; |
5242 | at::Tensor tmp_output = at::_ops::_fft_c2c_out::call(self_, dim, normalization, forward, out_); |
5243 | return out;; |
5244 | } |
5245 | } else { |
5246 | at::Tensor tmp_output; |
5247 | { |
5248 | at::AutoDispatchSkipFunctionalize guard; |
5249 | tmp_output = at::_ops::_fft_c2c::call(self_, dim, normalization, forward); |
5250 | } |
5251 | at::functionalization::impl::replace_(out, tmp_output); |
5252 | at::functionalization::impl::commit_update(out); |
5253 | at::functionalization::impl::sync(out); |
5254 | return out; |
5255 | } |
5256 | } |
5257 | |
5258 | at::Tensor & index_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) { |
5259 | if (false) { |
5260 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5261 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5262 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5263 | auto self_meta = to_meta(self); |
5264 | auto index_meta = to_meta(index); |
5265 | auto source_meta = to_meta(source); |
5266 | auto out_meta = to_meta(out); |
5267 | at::AutoDispatchSkipFunctionalize func_guard; |
5268 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5269 | at::_ops::index_copy_out::call(self_meta, dim, index_meta, source_meta, out_meta); |
5270 | } |
5271 | |
5272 | at::Tensor self_; |
5273 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5274 | at::functionalization::impl::sync(self); |
5275 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5276 | } else { |
5277 | self_ = self; |
5278 | } |
5279 | |
5280 | at::Tensor index_; |
5281 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
5282 | at::functionalization::impl::sync(index); |
5283 | index_ = at::functionalization::impl::from_functional_tensor(index); |
5284 | } else { |
5285 | index_ = index; |
5286 | } |
5287 | |
5288 | at::Tensor source_; |
5289 | if (at::functionalization::impl::isFunctionalTensor(source)) { |
5290 | at::functionalization::impl::sync(source); |
5291 | source_ = at::functionalization::impl::from_functional_tensor(source); |
5292 | } else { |
5293 | source_ = source; |
5294 | } |
5295 | |
5296 | at::Tensor out_; |
5297 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5298 | at::functionalization::impl::sync(out); |
5299 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5300 | } else { |
5301 | out_ = out; |
5302 | } |
5303 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5304 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) { |
5305 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5306 | TORCH_INTERNAL_ASSERT(false, |
5307 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5308 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5309 | } else { |
5310 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5311 | at::AutoDispatchSkipFunctionalize guard; |
5312 | at::Tensor tmp_output = at::_ops::index_copy_out::call(self_, dim, index_, source_, out_); |
5313 | return out;; |
5314 | } |
5315 | } else { |
5316 | at::Tensor tmp_output; |
5317 | { |
5318 | at::AutoDispatchSkipFunctionalize guard; |
5319 | tmp_output = at::_ops::index_copy::call(self_, dim, index_, source_); |
5320 | } |
5321 | at::functionalization::impl::replace_(out, tmp_output); |
5322 | at::functionalization::impl::commit_update(out); |
5323 | at::functionalization::impl::sync(out); |
5324 | return out; |
5325 | } |
5326 | } |
5327 | |
5328 | at::Tensor & index_copy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { |
5329 | if (true) { |
5330 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5331 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5332 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5333 | auto self_meta = to_meta(self); |
5334 | auto index_meta = to_meta(index); |
5335 | auto source_meta = to_meta(source); |
5336 | at::AutoDispatchSkipFunctionalize func_guard; |
5337 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5338 | at::_ops::index_copy_::call(self_meta, dim, index_meta, source_meta); |
5339 | } |
5340 | |
5341 | at::Tensor self_; |
5342 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5343 | at::functionalization::impl::sync(self); |
5344 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5345 | } else { |
5346 | self_ = self; |
5347 | } |
5348 | |
5349 | at::Tensor index_; |
5350 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
5351 | at::functionalization::impl::sync(index); |
5352 | index_ = at::functionalization::impl::from_functional_tensor(index); |
5353 | } else { |
5354 | index_ = index; |
5355 | } |
5356 | |
5357 | at::Tensor source_; |
5358 | if (at::functionalization::impl::isFunctionalTensor(source)) { |
5359 | at::functionalization::impl::sync(source); |
5360 | source_ = at::functionalization::impl::from_functional_tensor(source); |
5361 | } else { |
5362 | source_ = source; |
5363 | } |
5364 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
5365 | if ((false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) { |
5366 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5367 | TORCH_INTERNAL_ASSERT(false, |
5368 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5369 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5370 | } else { |
5371 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5372 | at::AutoDispatchSkipFunctionalize guard; |
5373 | at::Tensor tmp_output = at::_ops::index_copy_::call(self_, dim, index_, source_); |
5374 | return self;; |
5375 | } |
5376 | } else { |
5377 | at::Tensor tmp_output; |
5378 | { |
5379 | at::AutoDispatchSkipFunctionalize guard; |
5380 | tmp_output = at::_ops::index_copy::call(self_, dim, index_, source_); |
5381 | } |
5382 | at::functionalization::impl::replace_(self, tmp_output); |
5383 | at::functionalization::impl::commit_update(self); |
5384 | at::functionalization::impl::sync(self); |
5385 | return self; |
5386 | } |
5387 | } |
5388 | |
5389 | at::Tensor & isin_out_Tensor_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) { |
5390 | if (false) { |
5391 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5392 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5393 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5394 | auto elements_meta = to_meta(elements); |
5395 | auto test_elements_meta = to_meta(test_elements); |
5396 | auto out_meta = to_meta(out); |
5397 | at::AutoDispatchSkipFunctionalize func_guard; |
5398 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5399 | at::_ops::isin_Tensor_Tensor_out::call(elements_meta, test_elements_meta, assume_unique, invert, out_meta); |
5400 | } |
5401 | |
5402 | at::Tensor elements_; |
5403 | if (at::functionalization::impl::isFunctionalTensor(elements)) { |
5404 | at::functionalization::impl::sync(elements); |
5405 | elements_ = at::functionalization::impl::from_functional_tensor(elements); |
5406 | } else { |
5407 | elements_ = elements; |
5408 | } |
5409 | |
5410 | at::Tensor test_elements_; |
5411 | if (at::functionalization::impl::isFunctionalTensor(test_elements)) { |
5412 | at::functionalization::impl::sync(test_elements); |
5413 | test_elements_ = at::functionalization::impl::from_functional_tensor(test_elements); |
5414 | } else { |
5415 | test_elements_ = test_elements; |
5416 | } |
5417 | |
5418 | at::Tensor out_; |
5419 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5420 | at::functionalization::impl::sync(out); |
5421 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5422 | } else { |
5423 | out_ = out; |
5424 | } |
5425 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5426 | if ((false || at::functionalization::impl::isFunctionalTensor(elements) || at::functionalization::impl::isFunctionalTensor(test_elements))) { |
5427 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5428 | TORCH_INTERNAL_ASSERT(false, |
5429 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5430 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5431 | } else { |
5432 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5433 | at::AutoDispatchSkipFunctionalize guard; |
5434 | at::Tensor tmp_output = at::_ops::isin_Tensor_Tensor_out::call(elements_, test_elements_, assume_unique, invert, out_); |
5435 | return out;; |
5436 | } |
5437 | } else { |
5438 | at::Tensor tmp_output; |
5439 | { |
5440 | at::AutoDispatchSkipFunctionalize guard; |
5441 | tmp_output = at::_ops::isin_Tensor_Tensor::call(elements_, test_elements_, assume_unique, invert); |
5442 | } |
5443 | at::functionalization::impl::replace_(out, tmp_output); |
5444 | at::functionalization::impl::commit_update(out); |
5445 | at::functionalization::impl::sync(out); |
5446 | return out; |
5447 | } |
5448 | } |
5449 | |
5450 | at::Tensor & isin_out_Tensor_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) { |
5451 | if (false) { |
5452 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5453 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5454 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5455 | auto elements_meta = to_meta(elements); |
5456 | auto out_meta = to_meta(out); |
5457 | at::AutoDispatchSkipFunctionalize func_guard; |
5458 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5459 | at::_ops::isin_Tensor_Scalar_out::call(elements_meta, test_element, assume_unique, invert, out_meta); |
5460 | } |
5461 | |
5462 | at::Tensor elements_; |
5463 | if (at::functionalization::impl::isFunctionalTensor(elements)) { |
5464 | at::functionalization::impl::sync(elements); |
5465 | elements_ = at::functionalization::impl::from_functional_tensor(elements); |
5466 | } else { |
5467 | elements_ = elements; |
5468 | } |
5469 | |
5470 | at::Tensor out_; |
5471 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5472 | at::functionalization::impl::sync(out); |
5473 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5474 | } else { |
5475 | out_ = out; |
5476 | } |
5477 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5478 | if ((false || at::functionalization::impl::isFunctionalTensor(elements))) { |
5479 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5480 | TORCH_INTERNAL_ASSERT(false, |
5481 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5482 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5483 | } else { |
5484 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5485 | at::AutoDispatchSkipFunctionalize guard; |
5486 | at::Tensor tmp_output = at::_ops::isin_Tensor_Scalar_out::call(elements_, test_element, assume_unique, invert, out_); |
5487 | return out;; |
5488 | } |
5489 | } else { |
5490 | at::Tensor tmp_output; |
5491 | { |
5492 | at::AutoDispatchSkipFunctionalize guard; |
5493 | tmp_output = at::_ops::isin_Tensor_Scalar::call(elements_, test_element, assume_unique, invert); |
5494 | } |
5495 | at::functionalization::impl::replace_(out, tmp_output); |
5496 | at::functionalization::impl::commit_update(out); |
5497 | at::functionalization::impl::sync(out); |
5498 | return out; |
5499 | } |
5500 | } |
5501 | |
5502 | at::Tensor & isin_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) { |
5503 | if (false) { |
5504 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5505 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5506 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5507 | auto test_elements_meta = to_meta(test_elements); |
5508 | auto out_meta = to_meta(out); |
5509 | at::AutoDispatchSkipFunctionalize func_guard; |
5510 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5511 | at::_ops::isin_Scalar_Tensor_out::call(element, test_elements_meta, assume_unique, invert, out_meta); |
5512 | } |
5513 | |
5514 | at::Tensor test_elements_; |
5515 | if (at::functionalization::impl::isFunctionalTensor(test_elements)) { |
5516 | at::functionalization::impl::sync(test_elements); |
5517 | test_elements_ = at::functionalization::impl::from_functional_tensor(test_elements); |
5518 | } else { |
5519 | test_elements_ = test_elements; |
5520 | } |
5521 | |
5522 | at::Tensor out_; |
5523 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5524 | at::functionalization::impl::sync(out); |
5525 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5526 | } else { |
5527 | out_ = out; |
5528 | } |
5529 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5530 | if ((false || at::functionalization::impl::isFunctionalTensor(test_elements))) { |
5531 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5532 | TORCH_INTERNAL_ASSERT(false, |
5533 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5534 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5535 | } else { |
5536 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5537 | at::AutoDispatchSkipFunctionalize guard; |
5538 | at::Tensor tmp_output = at::_ops::isin_Scalar_Tensor_out::call(element, test_elements_, assume_unique, invert, out_); |
5539 | return out;; |
5540 | } |
5541 | } else { |
5542 | at::Tensor tmp_output; |
5543 | { |
5544 | at::AutoDispatchSkipFunctionalize guard; |
5545 | tmp_output = at::_ops::isin_Scalar_Tensor::call(element, test_elements_, assume_unique, invert); |
5546 | } |
5547 | at::functionalization::impl::replace_(out, tmp_output); |
5548 | at::functionalization::impl::commit_update(out); |
5549 | at::functionalization::impl::sync(out); |
5550 | return out; |
5551 | } |
5552 | } |
5553 | |
5554 | at::Tensor & kron_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
5555 | if (false) { |
5556 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5557 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5558 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5559 | auto self_meta = to_meta(self); |
5560 | auto other_meta = to_meta(other); |
5561 | auto out_meta = to_meta(out); |
5562 | at::AutoDispatchSkipFunctionalize func_guard; |
5563 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5564 | at::_ops::kron_out::call(self_meta, other_meta, out_meta); |
5565 | } |
5566 | |
5567 | at::Tensor self_; |
5568 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5569 | at::functionalization::impl::sync(self); |
5570 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5571 | } else { |
5572 | self_ = self; |
5573 | } |
5574 | |
5575 | at::Tensor other_; |
5576 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
5577 | at::functionalization::impl::sync(other); |
5578 | other_ = at::functionalization::impl::from_functional_tensor(other); |
5579 | } else { |
5580 | other_ = other; |
5581 | } |
5582 | |
5583 | at::Tensor out_; |
5584 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5585 | at::functionalization::impl::sync(out); |
5586 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5587 | } else { |
5588 | out_ = out; |
5589 | } |
5590 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5591 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
5592 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5593 | TORCH_INTERNAL_ASSERT(false, |
5594 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5595 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5596 | } else { |
5597 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5598 | at::AutoDispatchSkipFunctionalize guard; |
5599 | at::Tensor tmp_output = at::_ops::kron_out::call(self_, other_, out_); |
5600 | return out;; |
5601 | } |
5602 | } else { |
5603 | at::Tensor tmp_output; |
5604 | { |
5605 | at::AutoDispatchSkipFunctionalize guard; |
5606 | tmp_output = at::_ops::kron::call(self_, other_); |
5607 | } |
5608 | at::functionalization::impl::replace_(out, tmp_output); |
5609 | at::functionalization::impl::commit_update(out); |
5610 | at::functionalization::impl::sync(out); |
5611 | return out; |
5612 | } |
5613 | } |
5614 | |
5615 | at::Tensor & nan_to_num_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf, at::Tensor & out) { |
5616 | if (false) { |
5617 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5618 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5619 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5620 | auto self_meta = to_meta(self); |
5621 | auto out_meta = to_meta(out); |
5622 | at::AutoDispatchSkipFunctionalize func_guard; |
5623 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5624 | at::_ops::nan_to_num_out::call(self_meta, nan, posinf, neginf, out_meta); |
5625 | } |
5626 | |
5627 | at::Tensor self_; |
5628 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5629 | at::functionalization::impl::sync(self); |
5630 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5631 | } else { |
5632 | self_ = self; |
5633 | } |
5634 | |
5635 | at::Tensor out_; |
5636 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5637 | at::functionalization::impl::sync(out); |
5638 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5639 | } else { |
5640 | out_ = out; |
5641 | } |
5642 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5643 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5644 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5645 | TORCH_INTERNAL_ASSERT(false, |
5646 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5647 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5648 | } else { |
5649 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5650 | at::AutoDispatchSkipFunctionalize guard; |
5651 | at::Tensor tmp_output = at::_ops::nan_to_num_out::call(self_, nan, posinf, neginf, out_); |
5652 | return out;; |
5653 | } |
5654 | } else { |
5655 | at::Tensor tmp_output; |
5656 | { |
5657 | at::AutoDispatchSkipFunctionalize guard; |
5658 | tmp_output = at::_ops::nan_to_num::call(self_, nan, posinf, neginf); |
5659 | } |
5660 | at::functionalization::impl::replace_(out, tmp_output); |
5661 | at::functionalization::impl::commit_update(out); |
5662 | at::functionalization::impl::sync(out); |
5663 | return out; |
5664 | } |
5665 | } |
5666 | |
5667 | at::Tensor & nan_to_num_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) { |
5668 | if (true) { |
5669 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5670 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5671 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5672 | auto self_meta = to_meta(self); |
5673 | at::AutoDispatchSkipFunctionalize func_guard; |
5674 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5675 | at::_ops::nan_to_num_::call(self_meta, nan, posinf, neginf); |
5676 | } |
5677 | |
5678 | at::Tensor self_; |
5679 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5680 | at::functionalization::impl::sync(self); |
5681 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5682 | } else { |
5683 | self_ = self; |
5684 | } |
5685 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
5686 | if ((false)) { |
5687 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5688 | TORCH_INTERNAL_ASSERT(false, |
5689 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5690 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5691 | } else { |
5692 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5693 | at::AutoDispatchSkipFunctionalize guard; |
5694 | at::Tensor tmp_output = at::_ops::nan_to_num_::call(self_, nan, posinf, neginf); |
5695 | return self;; |
5696 | } |
5697 | } else { |
5698 | at::Tensor tmp_output; |
5699 | { |
5700 | at::AutoDispatchSkipFunctionalize guard; |
5701 | tmp_output = at::_ops::nan_to_num::call(self_, nan, posinf, neginf); |
5702 | } |
5703 | at::functionalization::impl::replace_(self, tmp_output); |
5704 | at::functionalization::impl::commit_update(self); |
5705 | at::functionalization::impl::sync(self); |
5706 | return self; |
5707 | } |
5708 | } |
5709 | |
5710 | at::Tensor & linear_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out) { |
5711 | if (false) { |
5712 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5713 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5714 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5715 | auto input_meta = to_meta(input); |
5716 | auto weight_meta = to_meta(weight); |
5717 | auto bias_meta = to_meta(bias); |
5718 | auto out_meta = to_meta(out); |
5719 | at::AutoDispatchSkipFunctionalize func_guard; |
5720 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5721 | at::_ops::linear_out::call(input_meta, weight_meta, bias_meta, out_meta); |
5722 | } |
5723 | |
5724 | at::Tensor input_; |
5725 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
5726 | at::functionalization::impl::sync(input); |
5727 | input_ = at::functionalization::impl::from_functional_tensor(input); |
5728 | } else { |
5729 | input_ = input; |
5730 | } |
5731 | |
5732 | at::Tensor weight_; |
5733 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
5734 | at::functionalization::impl::sync(weight); |
5735 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
5736 | } else { |
5737 | weight_ = weight; |
5738 | } |
5739 | |
5740 | c10::optional<at::Tensor> bias_; |
5741 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
5742 | at::functionalization::impl::sync(bias); |
5743 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
5744 | } else { |
5745 | bias_ = bias; |
5746 | } |
5747 | |
5748 | at::Tensor out_; |
5749 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5750 | at::functionalization::impl::sync(out); |
5751 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5752 | } else { |
5753 | out_ = out; |
5754 | } |
5755 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5756 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
5757 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5758 | TORCH_INTERNAL_ASSERT(false, |
5759 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5760 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5761 | } else { |
5762 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5763 | at::AutoDispatchSkipFunctionalize guard; |
5764 | at::Tensor tmp_output = at::_ops::linear_out::call(input_, weight_, bias_, out_); |
5765 | return out;; |
5766 | } |
5767 | } else { |
5768 | at::Tensor tmp_output; |
5769 | { |
5770 | at::AutoDispatchSkipFunctionalize guard; |
5771 | tmp_output = at::_ops::linear::call(input_, weight_, bias_); |
5772 | } |
5773 | at::functionalization::impl::replace_(out, tmp_output); |
5774 | at::functionalization::impl::commit_update(out); |
5775 | at::functionalization::impl::sync(out); |
5776 | return out; |
5777 | } |
5778 | } |
5779 | |
5780 | at::Tensor & mkldnn_linear_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out) { |
5781 | if (false) { |
5782 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5783 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5784 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5785 | auto self_meta = to_meta(self); |
5786 | auto weight_meta = to_meta(weight); |
5787 | auto bias_meta = to_meta(bias); |
5788 | auto out_meta = to_meta(out); |
5789 | at::AutoDispatchSkipFunctionalize func_guard; |
5790 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5791 | at::_ops::mkldnn_linear_out::call(self_meta, weight_meta, bias_meta, out_meta); |
5792 | } |
5793 | |
5794 | at::Tensor self_; |
5795 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5796 | at::functionalization::impl::sync(self); |
5797 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5798 | } else { |
5799 | self_ = self; |
5800 | } |
5801 | |
5802 | at::Tensor weight_; |
5803 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
5804 | at::functionalization::impl::sync(weight); |
5805 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
5806 | } else { |
5807 | weight_ = weight; |
5808 | } |
5809 | |
5810 | c10::optional<at::Tensor> bias_; |
5811 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
5812 | at::functionalization::impl::sync(bias); |
5813 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
5814 | } else { |
5815 | bias_ = bias; |
5816 | } |
5817 | |
5818 | at::Tensor out_; |
5819 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5820 | at::functionalization::impl::sync(out); |
5821 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5822 | } else { |
5823 | out_ = out; |
5824 | } |
5825 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5826 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
5827 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5828 | TORCH_INTERNAL_ASSERT(false, |
5829 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5830 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5831 | } else { |
5832 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5833 | at::AutoDispatchSkipFunctionalize guard; |
5834 | at::Tensor tmp_output = at::_ops::mkldnn_linear_out::call(self_, weight_, bias_, out_); |
5835 | return out;; |
5836 | } |
5837 | } else { |
5838 | at::Tensor tmp_output; |
5839 | { |
5840 | at::AutoDispatchSkipFunctionalize guard; |
5841 | tmp_output = at::_ops::mkldnn_linear::call(self_, weight_, bias_); |
5842 | } |
5843 | at::functionalization::impl::replace_(out, tmp_output); |
5844 | at::functionalization::impl::commit_update(out); |
5845 | at::functionalization::impl::sync(out); |
5846 | return out; |
5847 | } |
5848 | } |
5849 | |
5850 | at::Tensor & linspace_out_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) { |
5851 | if (false) { |
5852 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5853 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5854 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5855 | auto out_meta = to_meta(out); |
5856 | at::AutoDispatchSkipFunctionalize func_guard; |
5857 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5858 | at::_ops::linspace_out::call(start, end, steps, out_meta); |
5859 | } |
5860 | |
5861 | at::Tensor out_; |
5862 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5863 | at::functionalization::impl::sync(out); |
5864 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5865 | } else { |
5866 | out_ = out; |
5867 | } |
5868 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5869 | if ((false)) { |
5870 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5871 | TORCH_INTERNAL_ASSERT(false, |
5872 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5873 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5874 | } else { |
5875 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5876 | at::AutoDispatchSkipFunctionalize guard; |
5877 | at::Tensor tmp_output = at::_ops::linspace_out::call(start, end, steps, out_); |
5878 | return out;; |
5879 | } |
5880 | } else { |
5881 | at::Tensor tmp_output; |
5882 | { |
5883 | at::AutoDispatchSkipFunctionalize guard; |
5884 | tmp_output = at::_ops::linspace::call(start, end, steps, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
5885 | } |
5886 | at::functionalization::impl::replace_(out, tmp_output); |
5887 | at::functionalization::impl::commit_update(out); |
5888 | at::functionalization::impl::sync(out); |
5889 | return out; |
5890 | } |
5891 | } |
5892 | |
5893 | at::Tensor & log_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
5894 | if (false) { |
5895 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5896 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5897 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5898 | auto self_meta = to_meta(self); |
5899 | auto out_meta = to_meta(out); |
5900 | at::AutoDispatchSkipFunctionalize func_guard; |
5901 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5902 | at::_ops::log_out::call(self_meta, out_meta); |
5903 | } |
5904 | |
5905 | at::Tensor self_; |
5906 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5907 | at::functionalization::impl::sync(self); |
5908 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5909 | } else { |
5910 | self_ = self; |
5911 | } |
5912 | |
5913 | at::Tensor out_; |
5914 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5915 | at::functionalization::impl::sync(out); |
5916 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5917 | } else { |
5918 | out_ = out; |
5919 | } |
5920 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5921 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5922 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5923 | TORCH_INTERNAL_ASSERT(false, |
5924 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5925 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5926 | } else { |
5927 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5928 | at::AutoDispatchSkipFunctionalize guard; |
5929 | at::Tensor tmp_output = at::_ops::log_out::call(self_, out_); |
5930 | return out;; |
5931 | } |
5932 | } else { |
5933 | at::Tensor tmp_output; |
5934 | { |
5935 | at::AutoDispatchSkipFunctionalize guard; |
5936 | tmp_output = at::_ops::log::call(self_); |
5937 | } |
5938 | at::functionalization::impl::replace_(out, tmp_output); |
5939 | at::functionalization::impl::commit_update(out); |
5940 | at::functionalization::impl::sync(out); |
5941 | return out; |
5942 | } |
5943 | } |
5944 | |
5945 | at::Tensor & log_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
5946 | if (true) { |
5947 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5948 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5949 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5950 | auto self_meta = to_meta(self); |
5951 | at::AutoDispatchSkipFunctionalize func_guard; |
5952 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5953 | at::_ops::log_::call(self_meta); |
5954 | } |
5955 | |
5956 | at::Tensor self_; |
5957 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5958 | at::functionalization::impl::sync(self); |
5959 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5960 | } else { |
5961 | self_ = self; |
5962 | } |
5963 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
5964 | if ((false)) { |
5965 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5966 | TORCH_INTERNAL_ASSERT(false, |
5967 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5968 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5969 | } else { |
5970 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5971 | at::AutoDispatchSkipFunctionalize guard; |
5972 | at::Tensor tmp_output = at::_ops::log_::call(self_); |
5973 | return self;; |
5974 | } |
5975 | } else { |
5976 | at::Tensor tmp_output; |
5977 | { |
5978 | at::AutoDispatchSkipFunctionalize guard; |
5979 | tmp_output = at::_ops::log::call(self_); |
5980 | } |
5981 | at::functionalization::impl::replace_(self, tmp_output); |
5982 | at::functionalization::impl::commit_update(self); |
5983 | at::functionalization::impl::sync(self); |
5984 | return self; |
5985 | } |
5986 | } |
5987 | |
5988 | at::Tensor & log_softmax_out_int_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
5989 | if (false) { |
5990 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5991 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5992 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5993 | auto self_meta = to_meta(self); |
5994 | auto out_meta = to_meta(out); |
5995 | at::AutoDispatchSkipFunctionalize func_guard; |
5996 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5997 | at::_ops::log_softmax_int_out::call(self_meta, dim, dtype, out_meta); |
5998 | } |
5999 | |
6000 | at::Tensor self_; |
6001 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6002 | at::functionalization::impl::sync(self); |
6003 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6004 | } else { |
6005 | self_ = self; |
6006 | } |
6007 | |
6008 | at::Tensor out_; |
6009 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6010 | at::functionalization::impl::sync(out); |
6011 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6012 | } else { |
6013 | out_ = out; |
6014 | } |
6015 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6016 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6017 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6018 | TORCH_INTERNAL_ASSERT(false, |
6019 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6020 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6021 | } else { |
6022 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6023 | at::AutoDispatchSkipFunctionalize guard; |
6024 | at::Tensor tmp_output = at::_ops::log_softmax_int_out::call(self_, dim, dtype, out_); |
6025 | return out;; |
6026 | } |
6027 | } else { |
6028 | at::Tensor tmp_output; |
6029 | { |
6030 | at::AutoDispatchSkipFunctionalize guard; |
6031 | tmp_output = at::_ops::log_softmax_int::call(self_, dim, dtype); |
6032 | } |
6033 | at::functionalization::impl::replace_(out, tmp_output); |
6034 | at::functionalization::impl::commit_update(out); |
6035 | at::functionalization::impl::sync(out); |
6036 | return out; |
6037 | } |
6038 | } |
6039 | |
6040 | at::Tensor & _log_softmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { |
6041 | if (false) { |
6042 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6043 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6044 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6045 | auto self_meta = to_meta(self); |
6046 | auto out_meta = to_meta(out); |
6047 | at::AutoDispatchSkipFunctionalize func_guard; |
6048 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6049 | at::_ops::_log_softmax_out::call(self_meta, dim, half_to_float, out_meta); |
6050 | } |
6051 | |
6052 | at::Tensor self_; |
6053 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6054 | at::functionalization::impl::sync(self); |
6055 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6056 | } else { |
6057 | self_ = self; |
6058 | } |
6059 | |
6060 | at::Tensor out_; |
6061 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6062 | at::functionalization::impl::sync(out); |
6063 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6064 | } else { |
6065 | out_ = out; |
6066 | } |
6067 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6068 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6069 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6070 | TORCH_INTERNAL_ASSERT(false, |
6071 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6072 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6073 | } else { |
6074 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6075 | at::AutoDispatchSkipFunctionalize guard; |
6076 | at::Tensor tmp_output = at::_ops::_log_softmax_out::call(self_, dim, half_to_float, out_); |
6077 | return out;; |
6078 | } |
6079 | } else { |
6080 | at::Tensor tmp_output; |
6081 | { |
6082 | at::AutoDispatchSkipFunctionalize guard; |
6083 | tmp_output = at::_ops::_log_softmax::call(self_, dim, half_to_float); |
6084 | } |
6085 | at::functionalization::impl::replace_(out, tmp_output); |
6086 | at::functionalization::impl::commit_update(out); |
6087 | at::functionalization::impl::sync(out); |
6088 | return out; |
6089 | } |
6090 | } |
6091 | |
6092 | at::Tensor & _log_softmax_backward_data_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) { |
6093 | if (false) { |
6094 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6095 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6096 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6097 | auto grad_output_meta = to_meta(grad_output); |
6098 | auto output_meta = to_meta(output); |
6099 | auto out_meta = to_meta(out); |
6100 | at::AutoDispatchSkipFunctionalize func_guard; |
6101 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6102 | at::_ops::_log_softmax_backward_data_out::call(grad_output_meta, output_meta, dim, input_dtype, out_meta); |
6103 | } |
6104 | |
6105 | at::Tensor grad_output_; |
6106 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
6107 | at::functionalization::impl::sync(grad_output); |
6108 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
6109 | } else { |
6110 | grad_output_ = grad_output; |
6111 | } |
6112 | |
6113 | at::Tensor output_; |
6114 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
6115 | at::functionalization::impl::sync(output); |
6116 | output_ = at::functionalization::impl::from_functional_tensor(output); |
6117 | } else { |
6118 | output_ = output; |
6119 | } |
6120 | |
6121 | at::Tensor out_; |
6122 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6123 | at::functionalization::impl::sync(out); |
6124 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6125 | } else { |
6126 | out_ = out; |
6127 | } |
6128 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6129 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output))) { |
6130 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6131 | TORCH_INTERNAL_ASSERT(false, |
6132 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6133 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6134 | } else { |
6135 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6136 | at::AutoDispatchSkipFunctionalize guard; |
6137 | at::Tensor tmp_output = at::_ops::_log_softmax_backward_data_out::call(grad_output_, output_, dim, input_dtype, out_); |
6138 | return out;; |
6139 | } |
6140 | } else { |
6141 | at::Tensor tmp_output; |
6142 | { |
6143 | at::AutoDispatchSkipFunctionalize guard; |
6144 | tmp_output = at::_ops::_log_softmax_backward_data::call(grad_output_, output_, dim, input_dtype); |
6145 | } |
6146 | at::functionalization::impl::replace_(out, tmp_output); |
6147 | at::functionalization::impl::commit_update(out); |
6148 | at::functionalization::impl::sync(out); |
6149 | return out; |
6150 | } |
6151 | } |
6152 | |
6153 | ::std::tuple<at::Tensor &,at::Tensor &> max_out_dim_max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) { |
6154 | if (false) { |
6155 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6156 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6157 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6158 | auto self_meta = to_meta(self); |
6159 | auto max_meta = to_meta(max); |
6160 | auto max_values_meta = to_meta(max_values); |
6161 | at::AutoDispatchSkipFunctionalize func_guard; |
6162 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6163 | at::_ops::max_dim_max::call(self_meta, dim, keepdim, max_meta, max_values_meta); |
6164 | } |
6165 | |
6166 | at::Tensor self_; |
6167 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6168 | at::functionalization::impl::sync(self); |
6169 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6170 | } else { |
6171 | self_ = self; |
6172 | } |
6173 | |
6174 | at::Tensor max_; |
6175 | if (at::functionalization::impl::isFunctionalTensor(max)) { |
6176 | at::functionalization::impl::sync(max); |
6177 | max_ = at::functionalization::impl::from_functional_tensor(max); |
6178 | } else { |
6179 | max_ = max; |
6180 | } |
6181 | |
6182 | at::Tensor max_values_; |
6183 | if (at::functionalization::impl::isFunctionalTensor(max_values)) { |
6184 | at::functionalization::impl::sync(max_values); |
6185 | max_values_ = at::functionalization::impl::from_functional_tensor(max_values); |
6186 | } else { |
6187 | max_values_ = max_values; |
6188 | } |
6189 | if (!(true && at::functionalization::impl::isFunctionalTensor(max) && at::functionalization::impl::isFunctionalTensor(max_values))) { |
6190 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6191 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6192 | TORCH_INTERNAL_ASSERT(false, |
6193 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6194 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6195 | } else { |
6196 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6197 | at::AutoDispatchSkipFunctionalize guard; |
6198 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::max_dim_max::call(self_, dim, keepdim, max_, max_values_); |
6199 | return ::std::tuple<at::Tensor &,at::Tensor &>(max, max_values);; |
6200 | } |
6201 | } else { |
6202 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
6203 | { |
6204 | at::AutoDispatchSkipFunctionalize guard; |
6205 | tmp_output = at::_ops::max_dim::call(self_, dim, keepdim); |
6206 | } |
6207 | at::functionalization::impl::replace_(max, std::get<0>(tmp_output)); |
6208 | at::functionalization::impl::commit_update(max); |
6209 | at::functionalization::impl::sync(max); |
6210 | at::functionalization::impl::replace_(max_values, std::get<1>(tmp_output)); |
6211 | at::functionalization::impl::commit_update(max_values); |
6212 | at::functionalization::impl::sync(max_values); |
6213 | return ::std::tuple<at::Tensor &,at::Tensor &>(max, max_values); |
6214 | } |
6215 | } |
6216 | |
6217 | ::std::tuple<at::Tensor &,at::Tensor &> max_out_names_dim_max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) { |
6218 | if (false) { |
6219 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6220 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6221 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6222 | auto self_meta = to_meta(self); |
6223 | auto max_meta = to_meta(max); |
6224 | auto max_values_meta = to_meta(max_values); |
6225 | at::AutoDispatchSkipFunctionalize func_guard; |
6226 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6227 | at::_ops::max_names_dim_max::call(self_meta, dim, keepdim, max_meta, max_values_meta); |
6228 | } |
6229 | |
6230 | at::Tensor self_; |
6231 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6232 | at::functionalization::impl::sync(self); |
6233 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6234 | } else { |
6235 | self_ = self; |
6236 | } |
6237 | |
6238 | at::Tensor max_; |
6239 | if (at::functionalization::impl::isFunctionalTensor(max)) { |
6240 | at::functionalization::impl::sync(max); |
6241 | max_ = at::functionalization::impl::from_functional_tensor(max); |
6242 | } else { |
6243 | max_ = max; |
6244 | } |
6245 | |
6246 | at::Tensor max_values_; |
6247 | if (at::functionalization::impl::isFunctionalTensor(max_values)) { |
6248 | at::functionalization::impl::sync(max_values); |
6249 | max_values_ = at::functionalization::impl::from_functional_tensor(max_values); |
6250 | } else { |
6251 | max_values_ = max_values; |
6252 | } |
6253 | if (!(true && at::functionalization::impl::isFunctionalTensor(max) && at::functionalization::impl::isFunctionalTensor(max_values))) { |
6254 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6255 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6256 | TORCH_INTERNAL_ASSERT(false, |
6257 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6258 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6259 | } else { |
6260 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6261 | at::AutoDispatchSkipFunctionalize guard; |
6262 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::max_names_dim_max::call(self_, dim, keepdim, max_, max_values_); |
6263 | return ::std::tuple<at::Tensor &,at::Tensor &>(max, max_values);; |
6264 | } |
6265 | } else { |
6266 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
6267 | { |
6268 | at::AutoDispatchSkipFunctionalize guard; |
6269 | tmp_output = at::_ops::max_names_dim::call(self_, dim, keepdim); |
6270 | } |
6271 | at::functionalization::impl::replace_(max, std::get<0>(tmp_output)); |
6272 | at::functionalization::impl::commit_update(max); |
6273 | at::functionalization::impl::sync(max); |
6274 | at::functionalization::impl::replace_(max_values, std::get<1>(tmp_output)); |
6275 | at::functionalization::impl::commit_update(max_values); |
6276 | at::functionalization::impl::sync(max_values); |
6277 | return ::std::tuple<at::Tensor &,at::Tensor &>(max, max_values); |
6278 | } |
6279 | } |
6280 | |
6281 | at::Tensor & amax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { |
6282 | if (false) { |
6283 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6284 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6285 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6286 | auto self_meta = to_meta(self); |
6287 | auto out_meta = to_meta(out); |
6288 | at::AutoDispatchSkipFunctionalize func_guard; |
6289 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6290 | at::_ops::amax_out::call(self_meta, dim, keepdim, out_meta); |
6291 | } |
6292 | |
6293 | at::Tensor self_; |
6294 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6295 | at::functionalization::impl::sync(self); |
6296 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6297 | } else { |
6298 | self_ = self; |
6299 | } |
6300 | |
6301 | at::Tensor out_; |
6302 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6303 | at::functionalization::impl::sync(out); |
6304 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6305 | } else { |
6306 | out_ = out; |
6307 | } |
6308 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6309 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6310 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6311 | TORCH_INTERNAL_ASSERT(false, |
6312 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6313 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6314 | } else { |
6315 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6316 | at::AutoDispatchSkipFunctionalize guard; |
6317 | at::Tensor tmp_output = at::_ops::amax_out::call(self_, dim, keepdim, out_); |
6318 | return out;; |
6319 | } |
6320 | } else { |
6321 | at::Tensor tmp_output; |
6322 | { |
6323 | at::AutoDispatchSkipFunctionalize guard; |
6324 | tmp_output = at::_ops::amax::call(self_, dim, keepdim); |
6325 | } |
6326 | at::functionalization::impl::replace_(out, tmp_output); |
6327 | at::functionalization::impl::commit_update(out); |
6328 | at::functionalization::impl::sync(out); |
6329 | return out; |
6330 | } |
6331 | } |
6332 | |
6333 | at::Tensor & mkldnn_max_pool2d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
6334 | if (false) { |
6335 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6336 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6337 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6338 | auto grad_output_meta = to_meta(grad_output); |
6339 | auto output_meta = to_meta(output); |
6340 | auto input_meta = to_meta(input); |
6341 | auto out_meta = to_meta(out); |
6342 | at::AutoDispatchSkipFunctionalize func_guard; |
6343 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6344 | at::_ops::mkldnn_max_pool2d_backward_out::call(grad_output_meta, output_meta, input_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta); |
6345 | } |
6346 | |
6347 | at::Tensor grad_output_; |
6348 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
6349 | at::functionalization::impl::sync(grad_output); |
6350 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
6351 | } else { |
6352 | grad_output_ = grad_output; |
6353 | } |
6354 | |
6355 | at::Tensor output_; |
6356 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
6357 | at::functionalization::impl::sync(output); |
6358 | output_ = at::functionalization::impl::from_functional_tensor(output); |
6359 | } else { |
6360 | output_ = output; |
6361 | } |
6362 | |
6363 | at::Tensor input_; |
6364 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
6365 | at::functionalization::impl::sync(input); |
6366 | input_ = at::functionalization::impl::from_functional_tensor(input); |
6367 | } else { |
6368 | input_ = input; |
6369 | } |
6370 | |
6371 | at::Tensor out_; |
6372 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6373 | at::functionalization::impl::sync(out); |
6374 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6375 | } else { |
6376 | out_ = out; |
6377 | } |
6378 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6379 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(input))) { |
6380 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6381 | TORCH_INTERNAL_ASSERT(false, |
6382 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6383 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6384 | } else { |
6385 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6386 | at::AutoDispatchSkipFunctionalize guard; |
6387 | at::Tensor tmp_output = at::_ops::mkldnn_max_pool2d_backward_out::call(grad_output_, output_, input_, kernel_size, stride, padding, dilation, ceil_mode, out_); |
6388 | return out;; |
6389 | } |
6390 | } else { |
6391 | at::Tensor tmp_output; |
6392 | { |
6393 | at::AutoDispatchSkipFunctionalize guard; |
6394 | tmp_output = at::_ops::mkldnn_max_pool2d_backward::call(grad_output_, output_, input_, kernel_size, stride, padding, dilation, ceil_mode); |
6395 | } |
6396 | at::functionalization::impl::replace_(out, tmp_output); |
6397 | at::functionalization::impl::commit_update(out); |
6398 | at::functionalization::impl::sync(out); |
6399 | return out; |
6400 | } |
6401 | } |
6402 | |
6403 | at::Tensor & mkldnn_max_pool3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
6404 | if (false) { |
6405 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6406 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6407 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6408 | auto self_meta = to_meta(self); |
6409 | auto out_meta = to_meta(out); |
6410 | at::AutoDispatchSkipFunctionalize func_guard; |
6411 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6412 | at::_ops::mkldnn_max_pool3d_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta); |
6413 | } |
6414 | |
6415 | at::Tensor self_; |
6416 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6417 | at::functionalization::impl::sync(self); |
6418 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6419 | } else { |
6420 | self_ = self; |
6421 | } |
6422 | |
6423 | at::Tensor out_; |
6424 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6425 | at::functionalization::impl::sync(out); |
6426 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6427 | } else { |
6428 | out_ = out; |
6429 | } |
6430 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6431 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6432 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6433 | TORCH_INTERNAL_ASSERT(false, |
6434 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6435 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6436 | } else { |
6437 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6438 | at::AutoDispatchSkipFunctionalize guard; |
6439 | at::Tensor tmp_output = at::_ops::mkldnn_max_pool3d_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_); |
6440 | return out;; |
6441 | } |
6442 | } else { |
6443 | at::Tensor tmp_output; |
6444 | { |
6445 | at::AutoDispatchSkipFunctionalize guard; |
6446 | tmp_output = at::_ops::mkldnn_max_pool3d::call(self_, kernel_size, stride, padding, dilation, ceil_mode); |
6447 | } |
6448 | at::functionalization::impl::replace_(out, tmp_output); |
6449 | at::functionalization::impl::commit_update(out); |
6450 | at::functionalization::impl::sync(out); |
6451 | return out; |
6452 | } |
6453 | } |
6454 | |
6455 | at::Tensor & quantized_max_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
6456 | if (false) { |
6457 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6458 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6459 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6460 | auto self_meta = to_meta(self); |
6461 | auto out_meta = to_meta(out); |
6462 | at::AutoDispatchSkipFunctionalize func_guard; |
6463 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6464 | at::_ops::quantized_max_pool2d_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta); |
6465 | } |
6466 | |
6467 | at::Tensor self_; |
6468 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6469 | at::functionalization::impl::sync(self); |
6470 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6471 | } else { |
6472 | self_ = self; |
6473 | } |
6474 | |
6475 | at::Tensor out_; |
6476 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6477 | at::functionalization::impl::sync(out); |
6478 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6479 | } else { |
6480 | out_ = out; |
6481 | } |
6482 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6483 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6484 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6485 | TORCH_INTERNAL_ASSERT(false, |
6486 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6487 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6488 | } else { |
6489 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6490 | at::AutoDispatchSkipFunctionalize guard; |
6491 | at::Tensor tmp_output = at::_ops::quantized_max_pool2d_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_); |
6492 | return out;; |
6493 | } |
6494 | } else { |
6495 | at::Tensor tmp_output; |
6496 | { |
6497 | at::AutoDispatchSkipFunctionalize guard; |
6498 | tmp_output = at::_ops::quantized_max_pool2d::call(self_, kernel_size, stride, padding, dilation, ceil_mode); |
6499 | } |
6500 | at::functionalization::impl::replace_(out, tmp_output); |
6501 | at::functionalization::impl::commit_update(out); |
6502 | at::functionalization::impl::sync(out); |
6503 | return out; |
6504 | } |
6505 | } |
6506 | |
6507 | at::Tensor & mean_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
6508 | if (false) { |
6509 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6510 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6511 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6512 | auto self_meta = to_meta(self); |
6513 | auto out_meta = to_meta(out); |
6514 | at::AutoDispatchSkipFunctionalize func_guard; |
6515 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6516 | at::_ops::mean_out::call(self_meta, dim, keepdim, dtype, out_meta); |
6517 | } |
6518 | |
6519 | at::Tensor self_; |
6520 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6521 | at::functionalization::impl::sync(self); |
6522 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6523 | } else { |
6524 | self_ = self; |
6525 | } |
6526 | |
6527 | at::Tensor out_; |
6528 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6529 | at::functionalization::impl::sync(out); |
6530 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6531 | } else { |
6532 | out_ = out; |
6533 | } |
6534 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6535 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6536 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6537 | TORCH_INTERNAL_ASSERT(false, |
6538 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6539 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6540 | } else { |
6541 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6542 | at::AutoDispatchSkipFunctionalize guard; |
6543 | at::Tensor tmp_output = at::_ops::mean_out::call(self_, dim, keepdim, dtype, out_); |
6544 | return out;; |
6545 | } |
6546 | } else { |
6547 | at::Tensor tmp_output; |
6548 | { |
6549 | at::AutoDispatchSkipFunctionalize guard; |
6550 | tmp_output = at::_ops::mean_dim::call(self_, dim, keepdim, dtype); |
6551 | } |
6552 | at::functionalization::impl::replace_(out, tmp_output); |
6553 | at::functionalization::impl::commit_update(out); |
6554 | at::functionalization::impl::sync(out); |
6555 | return out; |
6556 | } |
6557 | } |
6558 | |
6559 | at::Tensor & mean_out_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
6560 | if (false) { |
6561 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6562 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6563 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6564 | auto self_meta = to_meta(self); |
6565 | auto out_meta = to_meta(out); |
6566 | at::AutoDispatchSkipFunctionalize func_guard; |
6567 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6568 | at::_ops::mean_names_out::call(self_meta, dim, keepdim, dtype, out_meta); |
6569 | } |
6570 | |
6571 | at::Tensor self_; |
6572 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6573 | at::functionalization::impl::sync(self); |
6574 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6575 | } else { |
6576 | self_ = self; |
6577 | } |
6578 | |
6579 | at::Tensor out_; |
6580 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6581 | at::functionalization::impl::sync(out); |
6582 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6583 | } else { |
6584 | out_ = out; |
6585 | } |
6586 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6587 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6588 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6589 | TORCH_INTERNAL_ASSERT(false, |
6590 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6591 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6592 | } else { |
6593 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6594 | at::AutoDispatchSkipFunctionalize guard; |
6595 | at::Tensor tmp_output = at::_ops::mean_names_out::call(self_, dim, keepdim, dtype, out_); |
6596 | return out;; |
6597 | } |
6598 | } else { |
6599 | at::Tensor tmp_output; |
6600 | { |
6601 | at::AutoDispatchSkipFunctionalize guard; |
6602 | tmp_output = at::_ops::mean_names_dim::call(self_, dim, keepdim, dtype); |
6603 | } |
6604 | at::functionalization::impl::replace_(out, tmp_output); |
6605 | at::functionalization::impl::commit_update(out); |
6606 | at::functionalization::impl::sync(out); |
6607 | return out; |
6608 | } |
6609 | } |
6610 | |
6611 | at::Tensor & nanmedian_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
6612 | if (false) { |
6613 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6614 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6615 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6616 | auto self_meta = to_meta(self); |
6617 | auto out_meta = to_meta(out); |
6618 | at::AutoDispatchSkipFunctionalize func_guard; |
6619 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6620 | at::_ops::nanmedian_out::call(self_meta, out_meta); |
6621 | } |
6622 | |
6623 | at::Tensor self_; |
6624 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6625 | at::functionalization::impl::sync(self); |
6626 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6627 | } else { |
6628 | self_ = self; |
6629 | } |
6630 | |
6631 | at::Tensor out_; |
6632 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6633 | at::functionalization::impl::sync(out); |
6634 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6635 | } else { |
6636 | out_ = out; |
6637 | } |
6638 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6639 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6640 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6641 | TORCH_INTERNAL_ASSERT(false, |
6642 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6643 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6644 | } else { |
6645 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6646 | at::AutoDispatchSkipFunctionalize guard; |
6647 | at::Tensor tmp_output = at::_ops::nanmedian_out::call(self_, out_); |
6648 | return out;; |
6649 | } |
6650 | } else { |
6651 | at::Tensor tmp_output; |
6652 | { |
6653 | at::AutoDispatchSkipFunctionalize guard; |
6654 | tmp_output = at::_ops::nanmedian::call(self_); |
6655 | } |
6656 | at::functionalization::impl::replace_(out, tmp_output); |
6657 | at::functionalization::impl::commit_update(out); |
6658 | at::functionalization::impl::sync(out); |
6659 | return out; |
6660 | } |
6661 | } |
6662 | |
6663 | ::std::tuple<at::Tensor &,at::Tensor &> nanmedian_out_dim_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
6664 | if (false) { |
6665 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6666 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6667 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6668 | auto self_meta = to_meta(self); |
6669 | auto values_meta = to_meta(values); |
6670 | auto indices_meta = to_meta(indices); |
6671 | at::AutoDispatchSkipFunctionalize func_guard; |
6672 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6673 | at::_ops::nanmedian_dim_values::call(self_meta, dim, keepdim, values_meta, indices_meta); |
6674 | } |
6675 | |
6676 | at::Tensor self_; |
6677 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6678 | at::functionalization::impl::sync(self); |
6679 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6680 | } else { |
6681 | self_ = self; |
6682 | } |
6683 | |
6684 | at::Tensor values_; |
6685 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
6686 | at::functionalization::impl::sync(values); |
6687 | values_ = at::functionalization::impl::from_functional_tensor(values); |
6688 | } else { |
6689 | values_ = values; |
6690 | } |
6691 | |
6692 | at::Tensor indices_; |
6693 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
6694 | at::functionalization::impl::sync(indices); |
6695 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
6696 | } else { |
6697 | indices_ = indices; |
6698 | } |
6699 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
6700 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6701 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6702 | TORCH_INTERNAL_ASSERT(false, |
6703 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6704 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6705 | } else { |
6706 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6707 | at::AutoDispatchSkipFunctionalize guard; |
6708 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::nanmedian_dim_values::call(self_, dim, keepdim, values_, indices_); |
6709 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
6710 | } |
6711 | } else { |
6712 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
6713 | { |
6714 | at::AutoDispatchSkipFunctionalize guard; |
6715 | tmp_output = at::_ops::nanmedian_dim::call(self_, dim, keepdim); |
6716 | } |
6717 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
6718 | at::functionalization::impl::commit_update(values); |
6719 | at::functionalization::impl::sync(values); |
6720 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
6721 | at::functionalization::impl::commit_update(indices); |
6722 | at::functionalization::impl::sync(indices); |
6723 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
6724 | } |
6725 | } |
6726 | |
6727 | ::std::tuple<at::Tensor &,at::Tensor &> nanmedian_out_names_dim_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
6728 | if (false) { |
6729 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6730 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6731 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6732 | auto self_meta = to_meta(self); |
6733 | auto values_meta = to_meta(values); |
6734 | auto indices_meta = to_meta(indices); |
6735 | at::AutoDispatchSkipFunctionalize func_guard; |
6736 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6737 | at::_ops::nanmedian_names_dim_values::call(self_meta, dim, keepdim, values_meta, indices_meta); |
6738 | } |
6739 | |
6740 | at::Tensor self_; |
6741 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6742 | at::functionalization::impl::sync(self); |
6743 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6744 | } else { |
6745 | self_ = self; |
6746 | } |
6747 | |
6748 | at::Tensor values_; |
6749 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
6750 | at::functionalization::impl::sync(values); |
6751 | values_ = at::functionalization::impl::from_functional_tensor(values); |
6752 | } else { |
6753 | values_ = values; |
6754 | } |
6755 | |
6756 | at::Tensor indices_; |
6757 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
6758 | at::functionalization::impl::sync(indices); |
6759 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
6760 | } else { |
6761 | indices_ = indices; |
6762 | } |
6763 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
6764 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6765 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6766 | TORCH_INTERNAL_ASSERT(false, |
6767 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6768 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6769 | } else { |
6770 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6771 | at::AutoDispatchSkipFunctionalize guard; |
6772 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::nanmedian_names_dim_values::call(self_, dim, keepdim, values_, indices_); |
6773 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
6774 | } |
6775 | } else { |
6776 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
6777 | { |
6778 | at::AutoDispatchSkipFunctionalize guard; |
6779 | tmp_output = at::_ops::nanmedian_names_dim::call(self_, dim, keepdim); |
6780 | } |
6781 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
6782 | at::functionalization::impl::commit_update(values); |
6783 | at::functionalization::impl::sync(values); |
6784 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
6785 | at::functionalization::impl::commit_update(indices); |
6786 | at::functionalization::impl::sync(indices); |
6787 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
6788 | } |
6789 | } |
6790 | |
6791 | ::std::tuple<at::Tensor &,at::Tensor &> mode_out_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
6792 | if (false) { |
6793 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6794 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6795 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6796 | auto self_meta = to_meta(self); |
6797 | auto values_meta = to_meta(values); |
6798 | auto indices_meta = to_meta(indices); |
6799 | at::AutoDispatchSkipFunctionalize func_guard; |
6800 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6801 | at::_ops::mode_values::call(self_meta, dim, keepdim, values_meta, indices_meta); |
6802 | } |
6803 | |
6804 | at::Tensor self_; |
6805 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6806 | at::functionalization::impl::sync(self); |
6807 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6808 | } else { |
6809 | self_ = self; |
6810 | } |
6811 | |
6812 | at::Tensor values_; |
6813 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
6814 | at::functionalization::impl::sync(values); |
6815 | values_ = at::functionalization::impl::from_functional_tensor(values); |
6816 | } else { |
6817 | values_ = values; |
6818 | } |
6819 | |
6820 | at::Tensor indices_; |
6821 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
6822 | at::functionalization::impl::sync(indices); |
6823 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
6824 | } else { |
6825 | indices_ = indices; |
6826 | } |
6827 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
6828 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6829 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6830 | TORCH_INTERNAL_ASSERT(false, |
6831 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6832 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6833 | } else { |
6834 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6835 | at::AutoDispatchSkipFunctionalize guard; |
6836 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::mode_values::call(self_, dim, keepdim, values_, indices_); |
6837 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
6838 | } |
6839 | } else { |
6840 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
6841 | { |
6842 | at::AutoDispatchSkipFunctionalize guard; |
6843 | tmp_output = at::_ops::mode::call(self_, dim, keepdim); |
6844 | } |
6845 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
6846 | at::functionalization::impl::commit_update(values); |
6847 | at::functionalization::impl::sync(values); |
6848 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
6849 | at::functionalization::impl::commit_update(indices); |
6850 | at::functionalization::impl::sync(indices); |
6851 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
6852 | } |
6853 | } |
6854 | |
6855 | ::std::tuple<at::Tensor &,at::Tensor &> mode_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
6856 | if (false) { |
6857 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6858 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6859 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6860 | auto self_meta = to_meta(self); |
6861 | auto values_meta = to_meta(values); |
6862 | auto indices_meta = to_meta(indices); |
6863 | at::AutoDispatchSkipFunctionalize func_guard; |
6864 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6865 | at::_ops::mode_dimname_out::call(self_meta, dim, keepdim, values_meta, indices_meta); |
6866 | } |
6867 | |
6868 | at::Tensor self_; |
6869 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6870 | at::functionalization::impl::sync(self); |
6871 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6872 | } else { |
6873 | self_ = self; |
6874 | } |
6875 | |
6876 | at::Tensor values_; |
6877 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
6878 | at::functionalization::impl::sync(values); |
6879 | values_ = at::functionalization::impl::from_functional_tensor(values); |
6880 | } else { |
6881 | values_ = values; |
6882 | } |
6883 | |
6884 | at::Tensor indices_; |
6885 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
6886 | at::functionalization::impl::sync(indices); |
6887 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
6888 | } else { |
6889 | indices_ = indices; |
6890 | } |
6891 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
6892 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6893 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6894 | TORCH_INTERNAL_ASSERT(false, |
6895 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6896 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6897 | } else { |
6898 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6899 | at::AutoDispatchSkipFunctionalize guard; |
6900 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::mode_dimname_out::call(self_, dim, keepdim, values_, indices_); |
6901 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
6902 | } |
6903 | } else { |
6904 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
6905 | { |
6906 | at::AutoDispatchSkipFunctionalize guard; |
6907 | tmp_output = at::_ops::mode_dimname::call(self_, dim, keepdim); |
6908 | } |
6909 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
6910 | at::functionalization::impl::commit_update(values); |
6911 | at::functionalization::impl::sync(values); |
6912 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
6913 | at::functionalization::impl::commit_update(indices); |
6914 | at::functionalization::impl::sync(indices); |
6915 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
6916 | } |
6917 | } |
6918 | |
6919 | at::Tensor & multiply_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
6920 | if (false) { |
6921 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6922 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6923 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6924 | auto self_meta = to_meta(self); |
6925 | auto other_meta = to_meta(other); |
6926 | auto out_meta = to_meta(out); |
6927 | at::AutoDispatchSkipFunctionalize func_guard; |
6928 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6929 | at::_ops::multiply_out::call(self_meta, other_meta, out_meta); |
6930 | } |
6931 | |
6932 | at::Tensor self_; |
6933 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6934 | at::functionalization::impl::sync(self); |
6935 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6936 | } else { |
6937 | self_ = self; |
6938 | } |
6939 | |
6940 | at::Tensor other_; |
6941 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
6942 | at::functionalization::impl::sync(other); |
6943 | other_ = at::functionalization::impl::from_functional_tensor(other); |
6944 | } else { |
6945 | other_ = other; |
6946 | } |
6947 | |
6948 | at::Tensor out_; |
6949 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6950 | at::functionalization::impl::sync(out); |
6951 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6952 | } else { |
6953 | out_ = out; |
6954 | } |
6955 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6956 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
6957 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6958 | TORCH_INTERNAL_ASSERT(false, |
6959 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6960 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6961 | } else { |
6962 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6963 | at::AutoDispatchSkipFunctionalize guard; |
6964 | at::Tensor tmp_output = at::_ops::multiply_out::call(self_, other_, out_); |
6965 | return out;; |
6966 | } |
6967 | } else { |
6968 | at::Tensor tmp_output; |
6969 | { |
6970 | at::AutoDispatchSkipFunctionalize guard; |
6971 | tmp_output = at::_ops::multiply_Tensor::call(self_, other_); |
6972 | } |
6973 | at::functionalization::impl::replace_(out, tmp_output); |
6974 | at::functionalization::impl::commit_update(out); |
6975 | at::functionalization::impl::sync(out); |
6976 | return out; |
6977 | } |
6978 | } |
6979 | |
6980 | at::Tensor & multiply__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
6981 | if (true) { |
6982 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6983 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6984 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6985 | auto self_meta = to_meta(self); |
6986 | auto other_meta = to_meta(other); |
6987 | at::AutoDispatchSkipFunctionalize func_guard; |
6988 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6989 | at::_ops::multiply__Tensor::call(self_meta, other_meta); |
6990 | } |
6991 | |
6992 | at::Tensor self_; |
6993 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6994 | at::functionalization::impl::sync(self); |
6995 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6996 | } else { |
6997 | self_ = self; |
6998 | } |
6999 | |
7000 | at::Tensor other_; |
7001 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
7002 | at::functionalization::impl::sync(other); |
7003 | other_ = at::functionalization::impl::from_functional_tensor(other); |
7004 | } else { |
7005 | other_ = other; |
7006 | } |
7007 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
7008 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
7009 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7010 | TORCH_INTERNAL_ASSERT(false, |
7011 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7012 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7013 | } else { |
7014 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7015 | at::AutoDispatchSkipFunctionalize guard; |
7016 | at::Tensor tmp_output = at::_ops::multiply__Tensor::call(self_, other_); |
7017 | return self;; |
7018 | } |
7019 | } else { |
7020 | at::Tensor tmp_output; |
7021 | { |
7022 | at::AutoDispatchSkipFunctionalize guard; |
7023 | tmp_output = at::_ops::multiply_Tensor::call(self_, other_); |
7024 | } |
7025 | at::functionalization::impl::replace_(self, tmp_output); |
7026 | at::functionalization::impl::commit_update(self); |
7027 | at::functionalization::impl::sync(self); |
7028 | return self; |
7029 | } |
7030 | } |
7031 | |
7032 | at::Tensor & narrow_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) { |
7033 | if (false) { |
7034 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7035 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7036 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7037 | auto self_meta = to_meta(self); |
7038 | auto out_meta = to_meta(out); |
7039 | at::AutoDispatchSkipFunctionalize func_guard; |
7040 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7041 | at::_ops::narrow_copy_out::call(self_meta, dim, start, length, out_meta); |
7042 | } |
7043 | |
7044 | at::Tensor self_; |
7045 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7046 | at::functionalization::impl::sync(self); |
7047 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7048 | } else { |
7049 | self_ = self; |
7050 | } |
7051 | |
7052 | at::Tensor out_; |
7053 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7054 | at::functionalization::impl::sync(out); |
7055 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7056 | } else { |
7057 | out_ = out; |
7058 | } |
7059 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7060 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7061 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7062 | TORCH_INTERNAL_ASSERT(false, |
7063 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7064 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7065 | } else { |
7066 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7067 | at::AutoDispatchSkipFunctionalize guard; |
7068 | at::Tensor tmp_output = at::_ops::narrow_copy_out::call(self_, dim, start, length, out_); |
7069 | return out;; |
7070 | } |
7071 | } else { |
7072 | at::Tensor tmp_output; |
7073 | { |
7074 | at::AutoDispatchSkipFunctionalize guard; |
7075 | tmp_output = at::_ops::narrow_copy::call(self_, dim, start, length); |
7076 | } |
7077 | at::functionalization::impl::replace_(out, tmp_output); |
7078 | at::functionalization::impl::commit_update(out); |
7079 | at::functionalization::impl::sync(out); |
7080 | return out; |
7081 | } |
7082 | } |
7083 | |
7084 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count, at::Tensor & out0, at::Tensor & out1) { |
7085 | if (false) { |
7086 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7087 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7088 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7089 | auto input_meta = to_meta(input); |
7090 | auto mean_meta = to_meta(mean); |
7091 | auto invstd_meta = to_meta(invstd); |
7092 | auto running_mean_meta = to_meta(running_mean); |
7093 | auto running_var_meta = to_meta(running_var); |
7094 | auto out0_meta = to_meta(out0); |
7095 | auto out1_meta = to_meta(out1); |
7096 | at::AutoDispatchSkipFunctionalize func_guard; |
7097 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7098 | at::_ops::batch_norm_gather_stats_out::call(input_meta, mean_meta, invstd_meta, running_mean_meta, running_var_meta, momentum, eps, count, out0_meta, out1_meta); |
7099 | } |
7100 | |
7101 | at::Tensor input_; |
7102 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
7103 | at::functionalization::impl::sync(input); |
7104 | input_ = at::functionalization::impl::from_functional_tensor(input); |
7105 | } else { |
7106 | input_ = input; |
7107 | } |
7108 | |
7109 | at::Tensor mean_; |
7110 | if (at::functionalization::impl::isFunctionalTensor(mean)) { |
7111 | at::functionalization::impl::sync(mean); |
7112 | mean_ = at::functionalization::impl::from_functional_tensor(mean); |
7113 | } else { |
7114 | mean_ = mean; |
7115 | } |
7116 | |
7117 | at::Tensor invstd_; |
7118 | if (at::functionalization::impl::isFunctionalTensor(invstd)) { |
7119 | at::functionalization::impl::sync(invstd); |
7120 | invstd_ = at::functionalization::impl::from_functional_tensor(invstd); |
7121 | } else { |
7122 | invstd_ = invstd; |
7123 | } |
7124 | |
7125 | c10::optional<at::Tensor> running_mean_; |
7126 | if (at::functionalization::impl::isFunctionalTensor(running_mean)) { |
7127 | at::functionalization::impl::sync(running_mean); |
7128 | running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean); |
7129 | } else { |
7130 | running_mean_ = running_mean; |
7131 | } |
7132 | |
7133 | c10::optional<at::Tensor> running_var_; |
7134 | if (at::functionalization::impl::isFunctionalTensor(running_var)) { |
7135 | at::functionalization::impl::sync(running_var); |
7136 | running_var_ = at::functionalization::impl::from_functional_tensor(running_var); |
7137 | } else { |
7138 | running_var_ = running_var; |
7139 | } |
7140 | |
7141 | at::Tensor out0_; |
7142 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
7143 | at::functionalization::impl::sync(out0); |
7144 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
7145 | } else { |
7146 | out0_ = out0; |
7147 | } |
7148 | |
7149 | at::Tensor out1_; |
7150 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
7151 | at::functionalization::impl::sync(out1); |
7152 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
7153 | } else { |
7154 | out1_ = out1; |
7155 | } |
7156 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
7157 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(invstd) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var))) { |
7158 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7159 | TORCH_INTERNAL_ASSERT(false, |
7160 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7161 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7162 | } else { |
7163 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7164 | at::AutoDispatchSkipFunctionalize guard; |
7165 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::batch_norm_gather_stats_out::call(input_, mean_, invstd_, running_mean_, running_var_, momentum, eps, count, out0_, out1_); |
7166 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
7167 | } |
7168 | } else { |
7169 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
7170 | { |
7171 | at::AutoDispatchSkipFunctionalize guard; |
7172 | tmp_output = at::_ops::batch_norm_gather_stats::call(input_, mean_, invstd_, running_mean_, running_var_, momentum, eps, count); |
7173 | } |
7174 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
7175 | at::functionalization::impl::commit_update(out0); |
7176 | at::functionalization::impl::sync(out0); |
7177 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
7178 | at::functionalization::impl::commit_update(out1); |
7179 | at::functionalization::impl::sync(out1); |
7180 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
7181 | } |
7182 | } |
7183 | |
7184 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) { |
7185 | if (false) { |
7186 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7187 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7188 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7189 | auto input_meta = to_meta(input); |
7190 | auto mean_meta = to_meta(mean); |
7191 | auto invstd_meta = to_meta(invstd); |
7192 | auto running_mean_meta = to_meta(running_mean); |
7193 | auto running_var_meta = to_meta(running_var); |
7194 | auto counts_meta = to_meta(counts); |
7195 | auto out0_meta = to_meta(out0); |
7196 | auto out1_meta = to_meta(out1); |
7197 | at::AutoDispatchSkipFunctionalize func_guard; |
7198 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7199 | at::_ops::batch_norm_gather_stats_with_counts_out::call(input_meta, mean_meta, invstd_meta, running_mean_meta, running_var_meta, momentum, eps, counts_meta, out0_meta, out1_meta); |
7200 | } |
7201 | |
7202 | at::Tensor input_; |
7203 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
7204 | at::functionalization::impl::sync(input); |
7205 | input_ = at::functionalization::impl::from_functional_tensor(input); |
7206 | } else { |
7207 | input_ = input; |
7208 | } |
7209 | |
7210 | at::Tensor mean_; |
7211 | if (at::functionalization::impl::isFunctionalTensor(mean)) { |
7212 | at::functionalization::impl::sync(mean); |
7213 | mean_ = at::functionalization::impl::from_functional_tensor(mean); |
7214 | } else { |
7215 | mean_ = mean; |
7216 | } |
7217 | |
7218 | at::Tensor invstd_; |
7219 | if (at::functionalization::impl::isFunctionalTensor(invstd)) { |
7220 | at::functionalization::impl::sync(invstd); |
7221 | invstd_ = at::functionalization::impl::from_functional_tensor(invstd); |
7222 | } else { |
7223 | invstd_ = invstd; |
7224 | } |
7225 | |
7226 | c10::optional<at::Tensor> running_mean_; |
7227 | if (at::functionalization::impl::isFunctionalTensor(running_mean)) { |
7228 | at::functionalization::impl::sync(running_mean); |
7229 | running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean); |
7230 | } else { |
7231 | running_mean_ = running_mean; |
7232 | } |
7233 | |
7234 | c10::optional<at::Tensor> running_var_; |
7235 | if (at::functionalization::impl::isFunctionalTensor(running_var)) { |
7236 | at::functionalization::impl::sync(running_var); |
7237 | running_var_ = at::functionalization::impl::from_functional_tensor(running_var); |
7238 | } else { |
7239 | running_var_ = running_var; |
7240 | } |
7241 | |
7242 | at::Tensor counts_; |
7243 | if (at::functionalization::impl::isFunctionalTensor(counts)) { |
7244 | at::functionalization::impl::sync(counts); |
7245 | counts_ = at::functionalization::impl::from_functional_tensor(counts); |
7246 | } else { |
7247 | counts_ = counts; |
7248 | } |
7249 | |
7250 | at::Tensor out0_; |
7251 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
7252 | at::functionalization::impl::sync(out0); |
7253 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
7254 | } else { |
7255 | out0_ = out0; |
7256 | } |
7257 | |
7258 | at::Tensor out1_; |
7259 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
7260 | at::functionalization::impl::sync(out1); |
7261 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
7262 | } else { |
7263 | out1_ = out1; |
7264 | } |
7265 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
7266 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(invstd) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var) || at::functionalization::impl::isFunctionalTensor(counts))) { |
7267 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7268 | TORCH_INTERNAL_ASSERT(false, |
7269 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7270 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7271 | } else { |
7272 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7273 | at::AutoDispatchSkipFunctionalize guard; |
7274 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::batch_norm_gather_stats_with_counts_out::call(input_, mean_, invstd_, running_mean_, running_var_, momentum, eps, counts_, out0_, out1_); |
7275 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
7276 | } |
7277 | } else { |
7278 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
7279 | { |
7280 | at::AutoDispatchSkipFunctionalize guard; |
7281 | tmp_output = at::_ops::batch_norm_gather_stats_with_counts::call(input_, mean_, invstd_, running_mean_, running_var_, momentum, eps, counts_); |
7282 | } |
7283 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
7284 | at::functionalization::impl::commit_update(out0); |
7285 | at::functionalization::impl::sync(out0); |
7286 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
7287 | at::functionalization::impl::commit_update(out1); |
7288 | at::functionalization::impl::sync(out1); |
7289 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
7290 | } |
7291 | } |
7292 | |
7293 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
7294 | if (false) { |
7295 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7296 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7297 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7298 | auto grad_out_meta = to_meta(grad_out); |
7299 | auto input_meta = to_meta(input); |
7300 | auto weight_meta = to_meta(weight); |
7301 | auto running_mean_meta = to_meta(running_mean); |
7302 | auto running_var_meta = to_meta(running_var); |
7303 | auto save_mean_meta = to_meta(save_mean); |
7304 | auto save_invstd_meta = to_meta(save_invstd); |
7305 | auto out0_meta = to_meta(out0); |
7306 | auto out1_meta = to_meta(out1); |
7307 | auto out2_meta = to_meta(out2); |
7308 | at::AutoDispatchSkipFunctionalize func_guard; |
7309 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7310 | at::_ops::native_batch_norm_backward_out::call(grad_out_meta, input_meta, weight_meta, running_mean_meta, running_var_meta, save_mean_meta, save_invstd_meta, train, eps, output_mask, out0_meta, out1_meta, out2_meta); |
7311 | } |
7312 | |
7313 | at::Tensor grad_out_; |
7314 | if (at::functionalization::impl::isFunctionalTensor(grad_out)) { |
7315 | at::functionalization::impl::sync(grad_out); |
7316 | grad_out_ = at::functionalization::impl::from_functional_tensor(grad_out); |
7317 | } else { |
7318 | grad_out_ = grad_out; |
7319 | } |
7320 | |
7321 | at::Tensor input_; |
7322 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
7323 | at::functionalization::impl::sync(input); |
7324 | input_ = at::functionalization::impl::from_functional_tensor(input); |
7325 | } else { |
7326 | input_ = input; |
7327 | } |
7328 | |
7329 | c10::optional<at::Tensor> weight_; |
7330 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
7331 | at::functionalization::impl::sync(weight); |
7332 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
7333 | } else { |
7334 | weight_ = weight; |
7335 | } |
7336 | |
7337 | c10::optional<at::Tensor> running_mean_; |
7338 | if (at::functionalization::impl::isFunctionalTensor(running_mean)) { |
7339 | at::functionalization::impl::sync(running_mean); |
7340 | running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean); |
7341 | } else { |
7342 | running_mean_ = running_mean; |
7343 | } |
7344 | |
7345 | c10::optional<at::Tensor> running_var_; |
7346 | if (at::functionalization::impl::isFunctionalTensor(running_var)) { |
7347 | at::functionalization::impl::sync(running_var); |
7348 | running_var_ = at::functionalization::impl::from_functional_tensor(running_var); |
7349 | } else { |
7350 | running_var_ = running_var; |
7351 | } |
7352 | |
7353 | c10::optional<at::Tensor> save_mean_; |
7354 | if (at::functionalization::impl::isFunctionalTensor(save_mean)) { |
7355 | at::functionalization::impl::sync(save_mean); |
7356 | save_mean_ = at::functionalization::impl::from_functional_tensor(save_mean); |
7357 | } else { |
7358 | save_mean_ = save_mean; |
7359 | } |
7360 | |
7361 | c10::optional<at::Tensor> save_invstd_; |
7362 | if (at::functionalization::impl::isFunctionalTensor(save_invstd)) { |
7363 | at::functionalization::impl::sync(save_invstd); |
7364 | save_invstd_ = at::functionalization::impl::from_functional_tensor(save_invstd); |
7365 | } else { |
7366 | save_invstd_ = save_invstd; |
7367 | } |
7368 | |
7369 | at::Tensor out0_; |
7370 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
7371 | at::functionalization::impl::sync(out0); |
7372 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
7373 | } else { |
7374 | out0_ = out0; |
7375 | } |
7376 | |
7377 | at::Tensor out1_; |
7378 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
7379 | at::functionalization::impl::sync(out1); |
7380 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
7381 | } else { |
7382 | out1_ = out1; |
7383 | } |
7384 | |
7385 | at::Tensor out2_; |
7386 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
7387 | at::functionalization::impl::sync(out2); |
7388 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
7389 | } else { |
7390 | out2_ = out2; |
7391 | } |
7392 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
7393 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_out) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var) || at::functionalization::impl::isFunctionalTensor(save_mean) || at::functionalization::impl::isFunctionalTensor(save_invstd))) { |
7394 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7395 | TORCH_INTERNAL_ASSERT(false, |
7396 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7397 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7398 | } else { |
7399 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7400 | at::AutoDispatchSkipFunctionalize guard; |
7401 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::native_batch_norm_backward_out::call(grad_out_, input_, weight_, running_mean_, running_var_, save_mean_, save_invstd_, train, eps, output_mask, out0_, out1_, out2_); |
7402 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);; |
7403 | } |
7404 | } else { |
7405 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
7406 | { |
7407 | at::AutoDispatchSkipFunctionalize guard; |
7408 | tmp_output = at::_ops::native_batch_norm_backward::call(grad_out_, input_, weight_, running_mean_, running_var_, save_mean_, save_invstd_, train, eps, output_mask); |
7409 | } |
7410 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
7411 | at::functionalization::impl::commit_update(out0); |
7412 | at::functionalization::impl::sync(out0); |
7413 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
7414 | at::functionalization::impl::commit_update(out1); |
7415 | at::functionalization::impl::sync(out1); |
7416 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
7417 | at::functionalization::impl::commit_update(out2); |
7418 | at::functionalization::impl::sync(out2); |
7419 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
7420 | } |
7421 | } |
7422 | |
7423 | at::Tensor & ones_out_names_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
7424 | if (false) { |
7425 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7426 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7427 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7428 | auto out_meta = to_meta(out); |
7429 | at::AutoDispatchSkipFunctionalize func_guard; |
7430 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7431 | at::_ops::ones_names_out::call(size, names, out_meta); |
7432 | } |
7433 | |
7434 | at::Tensor out_; |
7435 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7436 | at::functionalization::impl::sync(out); |
7437 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7438 | } else { |
7439 | out_ = out; |
7440 | } |
7441 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7442 | if ((false)) { |
7443 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7444 | TORCH_INTERNAL_ASSERT(false, |
7445 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7446 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7447 | } else { |
7448 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7449 | at::AutoDispatchSkipFunctionalize guard; |
7450 | at::Tensor tmp_output = at::_ops::ones_names_out::call(size, names, out_); |
7451 | return out;; |
7452 | } |
7453 | } else { |
7454 | at::Tensor tmp_output; |
7455 | { |
7456 | at::AutoDispatchSkipFunctionalize guard; |
7457 | tmp_output = at::_ops::ones_names::call(size, names, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7458 | } |
7459 | at::functionalization::impl::replace_(out, tmp_output); |
7460 | at::functionalization::impl::commit_update(out); |
7461 | at::functionalization::impl::sync(out); |
7462 | return out; |
7463 | } |
7464 | } |
7465 | |
7466 | at::Tensor & ones_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) { |
7467 | if (false) { |
7468 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7469 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7470 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7471 | auto out_meta = to_meta(out); |
7472 | at::AutoDispatchSkipFunctionalize func_guard; |
7473 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7474 | at::_ops::ones_out::call(size, out_meta); |
7475 | } |
7476 | |
7477 | at::Tensor out_; |
7478 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7479 | at::functionalization::impl::sync(out); |
7480 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7481 | } else { |
7482 | out_ = out; |
7483 | } |
7484 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7485 | if ((false)) { |
7486 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7487 | TORCH_INTERNAL_ASSERT(false, |
7488 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7489 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7490 | } else { |
7491 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7492 | at::AutoDispatchSkipFunctionalize guard; |
7493 | at::Tensor tmp_output = at::_ops::ones_out::call(size, out_); |
7494 | return out;; |
7495 | } |
7496 | } else { |
7497 | at::Tensor tmp_output; |
7498 | { |
7499 | at::AutoDispatchSkipFunctionalize guard; |
7500 | tmp_output = at::_ops::ones::call(size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7501 | } |
7502 | at::functionalization::impl::replace_(out, tmp_output); |
7503 | at::functionalization::impl::commit_update(out); |
7504 | at::functionalization::impl::sync(out); |
7505 | return out; |
7506 | } |
7507 | } |
7508 | |
7509 | at::Tensor & _pdist_forward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, at::Tensor & out) { |
7510 | if (false) { |
7511 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7512 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7513 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7514 | auto self_meta = to_meta(self); |
7515 | auto out_meta = to_meta(out); |
7516 | at::AutoDispatchSkipFunctionalize func_guard; |
7517 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7518 | at::_ops::_pdist_forward_out::call(self_meta, p, out_meta); |
7519 | } |
7520 | |
7521 | at::Tensor self_; |
7522 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7523 | at::functionalization::impl::sync(self); |
7524 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7525 | } else { |
7526 | self_ = self; |
7527 | } |
7528 | |
7529 | at::Tensor out_; |
7530 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7531 | at::functionalization::impl::sync(out); |
7532 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7533 | } else { |
7534 | out_ = out; |
7535 | } |
7536 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7537 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7538 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7539 | TORCH_INTERNAL_ASSERT(false, |
7540 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7541 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7542 | } else { |
7543 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7544 | at::AutoDispatchSkipFunctionalize guard; |
7545 | at::Tensor tmp_output = at::_ops::_pdist_forward_out::call(self_, p, out_); |
7546 | return out;; |
7547 | } |
7548 | } else { |
7549 | at::Tensor tmp_output; |
7550 | { |
7551 | at::AutoDispatchSkipFunctionalize guard; |
7552 | tmp_output = at::_ops::_pdist_forward::call(self_, p); |
7553 | } |
7554 | at::functionalization::impl::replace_(out, tmp_output); |
7555 | at::functionalization::impl::commit_update(out); |
7556 | at::functionalization::impl::sync(out); |
7557 | return out; |
7558 | } |
7559 | } |
7560 | |
7561 | at::Tensor & _pdist_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist, at::Tensor & out) { |
7562 | if (false) { |
7563 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7564 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7565 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7566 | auto grad_meta = to_meta(grad); |
7567 | auto self_meta = to_meta(self); |
7568 | auto pdist_meta = to_meta(pdist); |
7569 | auto out_meta = to_meta(out); |
7570 | at::AutoDispatchSkipFunctionalize func_guard; |
7571 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7572 | at::_ops::_pdist_backward_out::call(grad_meta, self_meta, p, pdist_meta, out_meta); |
7573 | } |
7574 | |
7575 | at::Tensor grad_; |
7576 | if (at::functionalization::impl::isFunctionalTensor(grad)) { |
7577 | at::functionalization::impl::sync(grad); |
7578 | grad_ = at::functionalization::impl::from_functional_tensor(grad); |
7579 | } else { |
7580 | grad_ = grad; |
7581 | } |
7582 | |
7583 | at::Tensor self_; |
7584 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7585 | at::functionalization::impl::sync(self); |
7586 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7587 | } else { |
7588 | self_ = self; |
7589 | } |
7590 | |
7591 | at::Tensor pdist_; |
7592 | if (at::functionalization::impl::isFunctionalTensor(pdist)) { |
7593 | at::functionalization::impl::sync(pdist); |
7594 | pdist_ = at::functionalization::impl::from_functional_tensor(pdist); |
7595 | } else { |
7596 | pdist_ = pdist; |
7597 | } |
7598 | |
7599 | at::Tensor out_; |
7600 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7601 | at::functionalization::impl::sync(out); |
7602 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7603 | } else { |
7604 | out_ = out; |
7605 | } |
7606 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7607 | if ((false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(pdist))) { |
7608 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7609 | TORCH_INTERNAL_ASSERT(false, |
7610 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7611 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7612 | } else { |
7613 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7614 | at::AutoDispatchSkipFunctionalize guard; |
7615 | at::Tensor tmp_output = at::_ops::_pdist_backward_out::call(grad_, self_, p, pdist_, out_); |
7616 | return out;; |
7617 | } |
7618 | } else { |
7619 | at::Tensor tmp_output; |
7620 | { |
7621 | at::AutoDispatchSkipFunctionalize guard; |
7622 | tmp_output = at::_ops::_pdist_backward::call(grad_, self_, p, pdist_); |
7623 | } |
7624 | at::functionalization::impl::replace_(out, tmp_output); |
7625 | at::functionalization::impl::commit_update(out); |
7626 | at::functionalization::impl::sync(out); |
7627 | return out; |
7628 | } |
7629 | } |
7630 | |
7631 | at::Tensor & pixel_shuffle_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t upscale_factor, at::Tensor & out) { |
7632 | if (false) { |
7633 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7634 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7635 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7636 | auto self_meta = to_meta(self); |
7637 | auto out_meta = to_meta(out); |
7638 | at::AutoDispatchSkipFunctionalize func_guard; |
7639 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7640 | at::_ops::pixel_shuffle_out::call(self_meta, upscale_factor, out_meta); |
7641 | } |
7642 | |
7643 | at::Tensor self_; |
7644 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7645 | at::functionalization::impl::sync(self); |
7646 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7647 | } else { |
7648 | self_ = self; |
7649 | } |
7650 | |
7651 | at::Tensor out_; |
7652 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7653 | at::functionalization::impl::sync(out); |
7654 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7655 | } else { |
7656 | out_ = out; |
7657 | } |
7658 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7659 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7660 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7661 | TORCH_INTERNAL_ASSERT(false, |
7662 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7663 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7664 | } else { |
7665 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7666 | at::AutoDispatchSkipFunctionalize guard; |
7667 | at::Tensor tmp_output = at::_ops::pixel_shuffle_out::call(self_, upscale_factor, out_); |
7668 | return out;; |
7669 | } |
7670 | } else { |
7671 | at::Tensor tmp_output; |
7672 | { |
7673 | at::AutoDispatchSkipFunctionalize guard; |
7674 | tmp_output = at::_ops::pixel_shuffle::call(self_, upscale_factor); |
7675 | } |
7676 | at::functionalization::impl::replace_(out, tmp_output); |
7677 | at::functionalization::impl::commit_update(out); |
7678 | at::functionalization::impl::sync(out); |
7679 | return out; |
7680 | } |
7681 | } |
7682 | |
7683 | at::Tensor & _pin_memory_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Device> device, at::Tensor & out) { |
7684 | if (false) { |
7685 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7686 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7687 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7688 | auto self_meta = to_meta(self); |
7689 | auto out_meta = to_meta(out); |
7690 | at::AutoDispatchSkipFunctionalize func_guard; |
7691 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7692 | at::_ops::_pin_memory_out::call(self_meta, device, out_meta); |
7693 | } |
7694 | |
7695 | at::Tensor self_; |
7696 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7697 | at::functionalization::impl::sync(self); |
7698 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7699 | } else { |
7700 | self_ = self; |
7701 | } |
7702 | |
7703 | at::Tensor out_; |
7704 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7705 | at::functionalization::impl::sync(out); |
7706 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7707 | } else { |
7708 | out_ = out; |
7709 | } |
7710 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7711 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7712 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7713 | TORCH_INTERNAL_ASSERT(false, |
7714 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7715 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7716 | } else { |
7717 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7718 | at::AutoDispatchSkipFunctionalize guard; |
7719 | at::Tensor tmp_output = at::_ops::_pin_memory_out::call(self_, device, out_); |
7720 | return out;; |
7721 | } |
7722 | } else { |
7723 | at::Tensor tmp_output; |
7724 | { |
7725 | at::AutoDispatchSkipFunctionalize guard; |
7726 | tmp_output = at::_ops::_pin_memory::call(self_, device); |
7727 | } |
7728 | at::functionalization::impl::replace_(out, tmp_output); |
7729 | at::functionalization::impl::commit_update(out); |
7730 | at::functionalization::impl::sync(out); |
7731 | return out; |
7732 | } |
7733 | } |
7734 | |
7735 | at::Tensor & randn_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) { |
7736 | if (false) { |
7737 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7738 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7739 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7740 | auto out_meta = to_meta(out); |
7741 | at::AutoDispatchSkipFunctionalize func_guard; |
7742 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7743 | at::_ops::randn_out::call(size, out_meta); |
7744 | } |
7745 | |
7746 | at::Tensor out_; |
7747 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7748 | at::functionalization::impl::sync(out); |
7749 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7750 | } else { |
7751 | out_ = out; |
7752 | } |
7753 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7754 | if ((false)) { |
7755 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7756 | TORCH_INTERNAL_ASSERT(false, |
7757 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7758 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7759 | } else { |
7760 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7761 | at::AutoDispatchSkipFunctionalize guard; |
7762 | at::Tensor tmp_output = at::_ops::randn_out::call(size, out_); |
7763 | return out;; |
7764 | } |
7765 | } else { |
7766 | at::Tensor tmp_output; |
7767 | { |
7768 | at::AutoDispatchSkipFunctionalize guard; |
7769 | tmp_output = at::_ops::randn::call(size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7770 | } |
7771 | at::functionalization::impl::replace_(out, tmp_output); |
7772 | at::functionalization::impl::commit_update(out); |
7773 | at::functionalization::impl::sync(out); |
7774 | return out; |
7775 | } |
7776 | } |
7777 | |
7778 | at::Tensor & randn_out_generator_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
7779 | if (false) { |
7780 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7781 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7782 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7783 | auto out_meta = to_meta(out); |
7784 | at::AutoDispatchSkipFunctionalize func_guard; |
7785 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7786 | at::_ops::randn_generator_out::call(size, generator, out_meta); |
7787 | } |
7788 | |
7789 | at::Tensor out_; |
7790 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7791 | at::functionalization::impl::sync(out); |
7792 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7793 | } else { |
7794 | out_ = out; |
7795 | } |
7796 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7797 | if ((false)) { |
7798 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7799 | TORCH_INTERNAL_ASSERT(false, |
7800 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7801 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7802 | } else { |
7803 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7804 | at::AutoDispatchSkipFunctionalize guard; |
7805 | at::Tensor tmp_output = at::_ops::randn_generator_out::call(size, generator, out_); |
7806 | return out;; |
7807 | } |
7808 | } else { |
7809 | at::Tensor tmp_output; |
7810 | { |
7811 | at::AutoDispatchSkipFunctionalize guard; |
7812 | tmp_output = at::_ops::randn_generator::call(size, generator, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7813 | } |
7814 | at::functionalization::impl::replace_(out, tmp_output); |
7815 | at::functionalization::impl::commit_update(out); |
7816 | at::functionalization::impl::sync(out); |
7817 | return out; |
7818 | } |
7819 | } |
7820 | |
7821 | at::Tensor & randn_out_names_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
7822 | if (false) { |
7823 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7824 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7825 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7826 | auto out_meta = to_meta(out); |
7827 | at::AutoDispatchSkipFunctionalize func_guard; |
7828 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7829 | at::_ops::randn_names_out::call(size, names, out_meta); |
7830 | } |
7831 | |
7832 | at::Tensor out_; |
7833 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7834 | at::functionalization::impl::sync(out); |
7835 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7836 | } else { |
7837 | out_ = out; |
7838 | } |
7839 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7840 | if ((false)) { |
7841 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7842 | TORCH_INTERNAL_ASSERT(false, |
7843 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7844 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7845 | } else { |
7846 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7847 | at::AutoDispatchSkipFunctionalize guard; |
7848 | at::Tensor tmp_output = at::_ops::randn_names_out::call(size, names, out_); |
7849 | return out;; |
7850 | } |
7851 | } else { |
7852 | at::Tensor tmp_output; |
7853 | { |
7854 | at::AutoDispatchSkipFunctionalize guard; |
7855 | tmp_output = at::_ops::randn_names::call(size, names, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7856 | } |
7857 | at::functionalization::impl::replace_(out, tmp_output); |
7858 | at::functionalization::impl::commit_update(out); |
7859 | at::functionalization::impl::sync(out); |
7860 | return out; |
7861 | } |
7862 | } |
7863 | |
7864 | at::Tensor & randn_out_generator_with_names_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) { |
7865 | if (false) { |
7866 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7867 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7868 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7869 | auto out_meta = to_meta(out); |
7870 | at::AutoDispatchSkipFunctionalize func_guard; |
7871 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7872 | at::_ops::randn_generator_with_names_out::call(size, generator, names, out_meta); |
7873 | } |
7874 | |
7875 | at::Tensor out_; |
7876 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7877 | at::functionalization::impl::sync(out); |
7878 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7879 | } else { |
7880 | out_ = out; |
7881 | } |
7882 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7883 | if ((false)) { |
7884 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7885 | TORCH_INTERNAL_ASSERT(false, |
7886 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7887 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7888 | } else { |
7889 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7890 | at::AutoDispatchSkipFunctionalize guard; |
7891 | at::Tensor tmp_output = at::_ops::randn_generator_with_names_out::call(size, generator, names, out_); |
7892 | return out;; |
7893 | } |
7894 | } else { |
7895 | at::Tensor tmp_output; |
7896 | { |
7897 | at::AutoDispatchSkipFunctionalize guard; |
7898 | tmp_output = at::_ops::randn_generator_with_names::call(size, generator, names, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7899 | } |
7900 | at::functionalization::impl::replace_(out, tmp_output); |
7901 | at::functionalization::impl::commit_update(out); |
7902 | at::functionalization::impl::sync(out); |
7903 | return out; |
7904 | } |
7905 | } |
7906 | |
7907 | at::Tensor & randn_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
7908 | if (false) { |
7909 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7910 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7911 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7912 | auto self_meta = to_meta(self); |
7913 | auto out_meta = to_meta(out); |
7914 | at::AutoDispatchSkipFunctionalize func_guard; |
7915 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7916 | at::_ops::randn_like_out::call(self_meta, memory_format, out_meta); |
7917 | } |
7918 | |
7919 | at::Tensor self_; |
7920 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7921 | at::functionalization::impl::sync(self); |
7922 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7923 | } else { |
7924 | self_ = self; |
7925 | } |
7926 | |
7927 | at::Tensor out_; |
7928 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7929 | at::functionalization::impl::sync(out); |
7930 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7931 | } else { |
7932 | out_ = out; |
7933 | } |
7934 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7935 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7936 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7937 | TORCH_INTERNAL_ASSERT(false, |
7938 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7939 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7940 | } else { |
7941 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7942 | at::AutoDispatchSkipFunctionalize guard; |
7943 | at::Tensor tmp_output = at::_ops::randn_like_out::call(self_, memory_format, out_); |
7944 | return out;; |
7945 | } |
7946 | } else { |
7947 | at::Tensor tmp_output; |
7948 | { |
7949 | at::AutoDispatchSkipFunctionalize guard; |
7950 | tmp_output = at::_ops::randn_like::call(self_, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, memory_format); |
7951 | } |
7952 | at::functionalization::impl::replace_(out, tmp_output); |
7953 | at::functionalization::impl::commit_update(out); |
7954 | at::functionalization::impl::sync(out); |
7955 | return out; |
7956 | } |
7957 | } |
7958 | |
7959 | at::Tensor & neg_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
7960 | if (false) { |
7961 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7962 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7963 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7964 | auto self_meta = to_meta(self); |
7965 | auto out_meta = to_meta(out); |
7966 | at::AutoDispatchSkipFunctionalize func_guard; |
7967 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7968 | at::_ops::neg_out::call(self_meta, out_meta); |
7969 | } |
7970 | |
7971 | at::Tensor self_; |
7972 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7973 | at::functionalization::impl::sync(self); |
7974 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7975 | } else { |
7976 | self_ = self; |
7977 | } |
7978 | |
7979 | at::Tensor out_; |
7980 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7981 | at::functionalization::impl::sync(out); |
7982 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7983 | } else { |
7984 | out_ = out; |
7985 | } |
7986 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7987 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7988 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7989 | TORCH_INTERNAL_ASSERT(false, |
7990 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7991 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7992 | } else { |
7993 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7994 | at::AutoDispatchSkipFunctionalize guard; |
7995 | at::Tensor tmp_output = at::_ops::neg_out::call(self_, out_); |
7996 | return out;; |
7997 | } |
7998 | } else { |
7999 | at::Tensor tmp_output; |
8000 | { |
8001 | at::AutoDispatchSkipFunctionalize guard; |
8002 | tmp_output = at::_ops::neg::call(self_); |
8003 | } |
8004 | at::functionalization::impl::replace_(out, tmp_output); |
8005 | at::functionalization::impl::commit_update(out); |
8006 | at::functionalization::impl::sync(out); |
8007 | return out; |
8008 | } |
8009 | } |
8010 | |
8011 | at::Tensor & neg_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
8012 | if (true) { |
8013 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8014 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8015 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8016 | auto self_meta = to_meta(self); |
8017 | at::AutoDispatchSkipFunctionalize func_guard; |
8018 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8019 | at::_ops::neg_::call(self_meta); |
8020 | } |
8021 | |
8022 | at::Tensor self_; |
8023 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8024 | at::functionalization::impl::sync(self); |
8025 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8026 | } else { |
8027 | self_ = self; |
8028 | } |
8029 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
8030 | if ((false)) { |
8031 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8032 | TORCH_INTERNAL_ASSERT(false, |
8033 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8034 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8035 | } else { |
8036 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8037 | at::AutoDispatchSkipFunctionalize guard; |
8038 | at::Tensor tmp_output = at::_ops::neg_::call(self_); |
8039 | return self;; |
8040 | } |
8041 | } else { |
8042 | at::Tensor tmp_output; |
8043 | { |
8044 | at::AutoDispatchSkipFunctionalize guard; |
8045 | tmp_output = at::_ops::neg::call(self_); |
8046 | } |
8047 | at::functionalization::impl::replace_(self, tmp_output); |
8048 | at::functionalization::impl::commit_update(self); |
8049 | at::functionalization::impl::sync(self); |
8050 | return self; |
8051 | } |
8052 | } |
8053 | |
8054 | at::Tensor & negative_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
8055 | if (false) { |
8056 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8057 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8058 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8059 | auto self_meta = to_meta(self); |
8060 | auto out_meta = to_meta(out); |
8061 | at::AutoDispatchSkipFunctionalize func_guard; |
8062 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8063 | at::_ops::negative_out::call(self_meta, out_meta); |
8064 | } |
8065 | |
8066 | at::Tensor self_; |
8067 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8068 | at::functionalization::impl::sync(self); |
8069 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8070 | } else { |
8071 | self_ = self; |
8072 | } |
8073 | |
8074 | at::Tensor out_; |
8075 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8076 | at::functionalization::impl::sync(out); |
8077 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8078 | } else { |
8079 | out_ = out; |
8080 | } |
8081 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8082 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8083 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8084 | TORCH_INTERNAL_ASSERT(false, |
8085 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8086 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8087 | } else { |
8088 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8089 | at::AutoDispatchSkipFunctionalize guard; |
8090 | at::Tensor tmp_output = at::_ops::negative_out::call(self_, out_); |
8091 | return out;; |
8092 | } |
8093 | } else { |
8094 | at::Tensor tmp_output; |
8095 | { |
8096 | at::AutoDispatchSkipFunctionalize guard; |
8097 | tmp_output = at::_ops::negative::call(self_); |
8098 | } |
8099 | at::functionalization::impl::replace_(out, tmp_output); |
8100 | at::functionalization::impl::commit_update(out); |
8101 | at::functionalization::impl::sync(out); |
8102 | return out; |
8103 | } |
8104 | } |
8105 | |
8106 | at::Tensor & negative_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
8107 | if (true) { |
8108 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8109 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8110 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8111 | auto self_meta = to_meta(self); |
8112 | at::AutoDispatchSkipFunctionalize func_guard; |
8113 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8114 | at::_ops::negative_::call(self_meta); |
8115 | } |
8116 | |
8117 | at::Tensor self_; |
8118 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8119 | at::functionalization::impl::sync(self); |
8120 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8121 | } else { |
8122 | self_ = self; |
8123 | } |
8124 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
8125 | if ((false)) { |
8126 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8127 | TORCH_INTERNAL_ASSERT(false, |
8128 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8129 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8130 | } else { |
8131 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8132 | at::AutoDispatchSkipFunctionalize guard; |
8133 | at::Tensor tmp_output = at::_ops::negative_::call(self_); |
8134 | return self;; |
8135 | } |
8136 | } else { |
8137 | at::Tensor tmp_output; |
8138 | { |
8139 | at::AutoDispatchSkipFunctionalize guard; |
8140 | tmp_output = at::_ops::negative::call(self_); |
8141 | } |
8142 | at::functionalization::impl::replace_(self, tmp_output); |
8143 | at::functionalization::impl::commit_update(self); |
8144 | at::functionalization::impl::sync(self); |
8145 | return self; |
8146 | } |
8147 | } |
8148 | |
8149 | at::Tensor & repeat_interleave_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, c10::optional<int64_t> output_size, at::Tensor & out) { |
8150 | if (false) { |
8151 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8152 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8153 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8154 | auto repeats_meta = to_meta(repeats); |
8155 | auto out_meta = to_meta(out); |
8156 | at::AutoDispatchSkipFunctionalize func_guard; |
8157 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8158 | at::_ops::repeat_interleave_Tensor_out::call(repeats_meta, output_size, out_meta); |
8159 | } |
8160 | |
8161 | at::Tensor repeats_; |
8162 | if (at::functionalization::impl::isFunctionalTensor(repeats)) { |
8163 | at::functionalization::impl::sync(repeats); |
8164 | repeats_ = at::functionalization::impl::from_functional_tensor(repeats); |
8165 | } else { |
8166 | repeats_ = repeats; |
8167 | } |
8168 | |
8169 | at::Tensor out_; |
8170 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8171 | at::functionalization::impl::sync(out); |
8172 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8173 | } else { |
8174 | out_ = out; |
8175 | } |
8176 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8177 | if ((false || at::functionalization::impl::isFunctionalTensor(repeats))) { |
8178 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8179 | TORCH_INTERNAL_ASSERT(false, |
8180 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8181 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8182 | } else { |
8183 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8184 | at::AutoDispatchSkipFunctionalize guard; |
8185 | at::Tensor tmp_output = at::_ops::repeat_interleave_Tensor_out::call(repeats_, output_size, out_); |
8186 | return out;; |
8187 | } |
8188 | } else { |
8189 | at::Tensor tmp_output; |
8190 | { |
8191 | at::AutoDispatchSkipFunctionalize guard; |
8192 | tmp_output = at::_ops::repeat_interleave_Tensor::call(repeats_, output_size); |
8193 | } |
8194 | at::functionalization::impl::replace_(out, tmp_output); |
8195 | at::functionalization::impl::commit_update(out); |
8196 | at::functionalization::impl::sync(out); |
8197 | return out; |
8198 | } |
8199 | } |
8200 | |
8201 | at::Tensor & gelu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate, at::Tensor & out) { |
8202 | if (false) { |
8203 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8204 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8205 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8206 | auto self_meta = to_meta(self); |
8207 | auto out_meta = to_meta(out); |
8208 | at::AutoDispatchSkipFunctionalize func_guard; |
8209 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8210 | at::_ops::gelu_out::call(self_meta, approximate, out_meta); |
8211 | } |
8212 | |
8213 | at::Tensor self_; |
8214 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8215 | at::functionalization::impl::sync(self); |
8216 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8217 | } else { |
8218 | self_ = self; |
8219 | } |
8220 | |
8221 | at::Tensor out_; |
8222 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8223 | at::functionalization::impl::sync(out); |
8224 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8225 | } else { |
8226 | out_ = out; |
8227 | } |
8228 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8229 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8230 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8231 | TORCH_INTERNAL_ASSERT(false, |
8232 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8233 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8234 | } else { |
8235 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8236 | at::AutoDispatchSkipFunctionalize guard; |
8237 | at::Tensor tmp_output = at::_ops::gelu_out::call(self_, approximate, out_); |
8238 | return out;; |
8239 | } |
8240 | } else { |
8241 | at::Tensor tmp_output; |
8242 | { |
8243 | at::AutoDispatchSkipFunctionalize guard; |
8244 | tmp_output = at::_ops::gelu::call(self_, approximate); |
8245 | } |
8246 | at::functionalization::impl::replace_(out, tmp_output); |
8247 | at::functionalization::impl::commit_update(out); |
8248 | at::functionalization::impl::sync(out); |
8249 | return out; |
8250 | } |
8251 | } |
8252 | |
8253 | at::Tensor & gelu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::string_view approximate) { |
8254 | if (true) { |
8255 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8256 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8257 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8258 | auto self_meta = to_meta(self); |
8259 | at::AutoDispatchSkipFunctionalize func_guard; |
8260 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8261 | at::_ops::gelu_::call(self_meta, approximate); |
8262 | } |
8263 | |
8264 | at::Tensor self_; |
8265 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8266 | at::functionalization::impl::sync(self); |
8267 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8268 | } else { |
8269 | self_ = self; |
8270 | } |
8271 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
8272 | if ((false)) { |
8273 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8274 | TORCH_INTERNAL_ASSERT(false, |
8275 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8276 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8277 | } else { |
8278 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8279 | at::AutoDispatchSkipFunctionalize guard; |
8280 | at::Tensor tmp_output = at::_ops::gelu_::call(self_, approximate); |
8281 | return self;; |
8282 | } |
8283 | } else { |
8284 | at::Tensor tmp_output; |
8285 | { |
8286 | at::AutoDispatchSkipFunctionalize guard; |
8287 | tmp_output = at::_ops::gelu::call(self_, approximate); |
8288 | } |
8289 | at::functionalization::impl::replace_(self, tmp_output); |
8290 | at::functionalization::impl::commit_update(self); |
8291 | at::functionalization::impl::sync(self); |
8292 | return self; |
8293 | } |
8294 | } |
8295 | |
8296 | at::Tensor & select_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) { |
8297 | if (false) { |
8298 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8299 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8300 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8301 | auto grad_output_meta = to_meta(grad_output); |
8302 | auto out_meta = to_meta(out); |
8303 | at::AutoDispatchSkipFunctionalize func_guard; |
8304 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8305 | at::_ops::select_backward_out::call(grad_output_meta, input_sizes, dim, index, out_meta); |
8306 | } |
8307 | |
8308 | at::Tensor grad_output_; |
8309 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
8310 | at::functionalization::impl::sync(grad_output); |
8311 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
8312 | } else { |
8313 | grad_output_ = grad_output; |
8314 | } |
8315 | |
8316 | at::Tensor out_; |
8317 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8318 | at::functionalization::impl::sync(out); |
8319 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8320 | } else { |
8321 | out_ = out; |
8322 | } |
8323 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8324 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) { |
8325 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8326 | TORCH_INTERNAL_ASSERT(false, |
8327 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8328 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8329 | } else { |
8330 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8331 | at::AutoDispatchSkipFunctionalize guard; |
8332 | at::Tensor tmp_output = at::_ops::select_backward_out::call(grad_output_, input_sizes, dim, index, out_); |
8333 | return out;; |
8334 | } |
8335 | } else { |
8336 | at::Tensor tmp_output; |
8337 | { |
8338 | at::AutoDispatchSkipFunctionalize guard; |
8339 | tmp_output = at::_ops::select_backward::call(grad_output_, input_sizes, dim, index); |
8340 | } |
8341 | at::functionalization::impl::replace_(out, tmp_output); |
8342 | at::functionalization::impl::commit_update(out); |
8343 | at::functionalization::impl::sync(out); |
8344 | return out; |
8345 | } |
8346 | } |
8347 | |
8348 | at::Tensor & mish_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
8349 | if (false) { |
8350 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8351 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8352 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8353 | auto self_meta = to_meta(self); |
8354 | auto out_meta = to_meta(out); |
8355 | at::AutoDispatchSkipFunctionalize func_guard; |
8356 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8357 | at::_ops::mish_out::call(self_meta, out_meta); |
8358 | } |
8359 | |
8360 | at::Tensor self_; |
8361 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8362 | at::functionalization::impl::sync(self); |
8363 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8364 | } else { |
8365 | self_ = self; |
8366 | } |
8367 | |
8368 | at::Tensor out_; |
8369 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8370 | at::functionalization::impl::sync(out); |
8371 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8372 | } else { |
8373 | out_ = out; |
8374 | } |
8375 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8376 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8377 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8378 | TORCH_INTERNAL_ASSERT(false, |
8379 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8380 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8381 | } else { |
8382 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8383 | at::AutoDispatchSkipFunctionalize guard; |
8384 | at::Tensor tmp_output = at::_ops::mish_out::call(self_, out_); |
8385 | return out;; |
8386 | } |
8387 | } else { |
8388 | at::Tensor tmp_output; |
8389 | { |
8390 | at::AutoDispatchSkipFunctionalize guard; |
8391 | tmp_output = at::_ops::mish::call(self_); |
8392 | } |
8393 | at::functionalization::impl::replace_(out, tmp_output); |
8394 | at::functionalization::impl::commit_update(out); |
8395 | at::functionalization::impl::sync(out); |
8396 | return out; |
8397 | } |
8398 | } |
8399 | |
8400 | at::Tensor & mish_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
8401 | if (true) { |
8402 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8403 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8404 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8405 | auto self_meta = to_meta(self); |
8406 | at::AutoDispatchSkipFunctionalize func_guard; |
8407 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8408 | at::_ops::mish_::call(self_meta); |
8409 | } |
8410 | |
8411 | at::Tensor self_; |
8412 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8413 | at::functionalization::impl::sync(self); |
8414 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8415 | } else { |
8416 | self_ = self; |
8417 | } |
8418 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
8419 | if ((false)) { |
8420 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8421 | TORCH_INTERNAL_ASSERT(false, |
8422 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8423 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8424 | } else { |
8425 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8426 | at::AutoDispatchSkipFunctionalize guard; |
8427 | at::Tensor tmp_output = at::_ops::mish_::call(self_); |
8428 | return self;; |
8429 | } |
8430 | } else { |
8431 | at::Tensor tmp_output; |
8432 | { |
8433 | at::AutoDispatchSkipFunctionalize guard; |
8434 | tmp_output = at::_ops::mish::call(self_); |
8435 | } |
8436 | at::functionalization::impl::replace_(self, tmp_output); |
8437 | at::functionalization::impl::commit_update(self); |
8438 | at::functionalization::impl::sync(self); |
8439 | return self; |
8440 | } |
8441 | } |
8442 | |
8443 | at::Tensor & slice_scatter_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) { |
8444 | if (false) { |
8445 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8446 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8447 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8448 | auto self_meta = to_meta(self); |
8449 | auto src_meta = to_meta(src); |
8450 | auto out_meta = to_meta(out); |
8451 | at::AutoDispatchSkipFunctionalize func_guard; |
8452 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8453 | at::_ops::slice_scatter_out::call(self_meta, src_meta, dim, start, end, step, out_meta); |
8454 | } |
8455 | |
8456 | at::Tensor self_; |
8457 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8458 | at::functionalization::impl::sync(self); |
8459 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8460 | } else { |
8461 | self_ = self; |
8462 | } |
8463 | |
8464 | at::Tensor src_; |
8465 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
8466 | at::functionalization::impl::sync(src); |
8467 | src_ = at::functionalization::impl::from_functional_tensor(src); |
8468 | } else { |
8469 | src_ = src; |
8470 | } |
8471 | |
8472 | at::Tensor out_; |
8473 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8474 | at::functionalization::impl::sync(out); |
8475 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8476 | } else { |
8477 | out_ = out; |
8478 | } |
8479 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8480 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(src))) { |
8481 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8482 | TORCH_INTERNAL_ASSERT(false, |
8483 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8484 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8485 | } else { |
8486 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8487 | at::AutoDispatchSkipFunctionalize guard; |
8488 | at::Tensor tmp_output = at::_ops::slice_scatter_out::call(self_, src_, dim, start, end, step, out_); |
8489 | return out;; |
8490 | } |
8491 | } else { |
8492 | at::Tensor tmp_output; |
8493 | { |
8494 | at::AutoDispatchSkipFunctionalize guard; |
8495 | tmp_output = at::_ops::slice_scatter::call(self_, src_, dim, start, end, step); |
8496 | } |
8497 | at::functionalization::impl::replace_(out, tmp_output); |
8498 | at::functionalization::impl::commit_update(out); |
8499 | at::functionalization::impl::sync(out); |
8500 | return out; |
8501 | } |
8502 | } |
8503 | |
8504 | at::Tensor & diagonal_scatter_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
8505 | if (false) { |
8506 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8507 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8508 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8509 | auto self_meta = to_meta(self); |
8510 | auto src_meta = to_meta(src); |
8511 | auto out_meta = to_meta(out); |
8512 | at::AutoDispatchSkipFunctionalize func_guard; |
8513 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8514 | at::_ops::diagonal_scatter_out::call(self_meta, src_meta, offset, dim1, dim2, out_meta); |
8515 | } |
8516 | |
8517 | at::Tensor self_; |
8518 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8519 | at::functionalization::impl::sync(self); |
8520 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8521 | } else { |
8522 | self_ = self; |
8523 | } |
8524 | |
8525 | at::Tensor src_; |
8526 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
8527 | at::functionalization::impl::sync(src); |
8528 | src_ = at::functionalization::impl::from_functional_tensor(src); |
8529 | } else { |
8530 | src_ = src; |
8531 | } |
8532 | |
8533 | at::Tensor out_; |
8534 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8535 | at::functionalization::impl::sync(out); |
8536 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8537 | } else { |
8538 | out_ = out; |
8539 | } |
8540 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8541 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(src))) { |
8542 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8543 | TORCH_INTERNAL_ASSERT(false, |
8544 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8545 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8546 | } else { |
8547 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8548 | at::AutoDispatchSkipFunctionalize guard; |
8549 | at::Tensor tmp_output = at::_ops::diagonal_scatter_out::call(self_, src_, offset, dim1, dim2, out_); |
8550 | return out;; |
8551 | } |
8552 | } else { |
8553 | at::Tensor tmp_output; |
8554 | { |
8555 | at::AutoDispatchSkipFunctionalize guard; |
8556 | tmp_output = at::_ops::diagonal_scatter::call(self_, src_, offset, dim1, dim2); |
8557 | } |
8558 | at::functionalization::impl::replace_(out, tmp_output); |
8559 | at::functionalization::impl::commit_update(out); |
8560 | at::functionalization::impl::sync(out); |
8561 | return out; |
8562 | } |
8563 | } |
8564 | |
8565 | at::Tensor & _softmax_backward_data_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) { |
8566 | if (false) { |
8567 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8568 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8569 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8570 | auto grad_output_meta = to_meta(grad_output); |
8571 | auto output_meta = to_meta(output); |
8572 | auto grad_input_meta = to_meta(grad_input); |
8573 | at::AutoDispatchSkipFunctionalize func_guard; |
8574 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8575 | at::_ops::_softmax_backward_data_out::call(grad_output_meta, output_meta, dim, input_dtype, grad_input_meta); |
8576 | } |
8577 | |
8578 | at::Tensor grad_output_; |
8579 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
8580 | at::functionalization::impl::sync(grad_output); |
8581 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
8582 | } else { |
8583 | grad_output_ = grad_output; |
8584 | } |
8585 | |
8586 | at::Tensor output_; |
8587 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
8588 | at::functionalization::impl::sync(output); |
8589 | output_ = at::functionalization::impl::from_functional_tensor(output); |
8590 | } else { |
8591 | output_ = output; |
8592 | } |
8593 | |
8594 | at::Tensor grad_input_; |
8595 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
8596 | at::functionalization::impl::sync(grad_input); |
8597 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
8598 | } else { |
8599 | grad_input_ = grad_input; |
8600 | } |
8601 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
8602 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output))) { |
8603 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8604 | TORCH_INTERNAL_ASSERT(false, |
8605 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8606 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8607 | } else { |
8608 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8609 | at::AutoDispatchSkipFunctionalize guard; |
8610 | at::Tensor tmp_output = at::_ops::_softmax_backward_data_out::call(grad_output_, output_, dim, input_dtype, grad_input_); |
8611 | return grad_input;; |
8612 | } |
8613 | } else { |
8614 | at::Tensor tmp_output; |
8615 | { |
8616 | at::AutoDispatchSkipFunctionalize guard; |
8617 | tmp_output = at::_ops::_softmax_backward_data::call(grad_output_, output_, dim, input_dtype); |
8618 | } |
8619 | at::functionalization::impl::replace_(grad_input, tmp_output); |
8620 | at::functionalization::impl::commit_update(grad_input); |
8621 | at::functionalization::impl::sync(grad_input); |
8622 | return grad_input; |
8623 | } |
8624 | } |
8625 | |
8626 | void unsafe_split_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { |
8627 | if (false) { |
8628 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8629 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8630 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8631 | auto self_meta = to_meta(self); |
8632 | auto out_meta = to_meta(out); |
8633 | at::AutoDispatchSkipFunctionalize func_guard; |
8634 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8635 | at::_ops::unsafe_split_Tensor_out::call(self_meta, split_size, dim, out_meta); |
8636 | } |
8637 | |
8638 | at::Tensor self_; |
8639 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8640 | at::functionalization::impl::sync(self); |
8641 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8642 | } else { |
8643 | self_ = self; |
8644 | } |
8645 | |
8646 | ::std::vector<at::Tensor> out_; |
8647 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8648 | at::functionalization::impl::sync(out); |
8649 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8650 | } else { |
8651 | out_ = out.vec(); |
8652 | } |
8653 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8654 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8655 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8656 | TORCH_INTERNAL_ASSERT(false, |
8657 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8658 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8659 | } else { |
8660 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8661 | at::AutoDispatchSkipFunctionalize guard; |
8662 | at::_ops::unsafe_split_Tensor_out::call(self_, split_size, dim, out_); |
8663 | ; |
8664 | } |
8665 | } else { |
8666 | ::std::vector<at::Tensor> tmp_output; |
8667 | { |
8668 | at::AutoDispatchSkipFunctionalize guard; |
8669 | tmp_output = at::_ops::unsafe_split_Tensor::call(self_, split_size, dim); |
8670 | } |
8671 | at::functionalization::impl::replace_(out, tmp_output); |
8672 | at::functionalization::impl::commit_update(out); |
8673 | at::functionalization::impl::sync(out); |
8674 | |
8675 | } |
8676 | } |
8677 | |
8678 | void unsafe_split_with_sizes_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { |
8679 | if (false) { |
8680 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8681 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8682 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8683 | auto self_meta = to_meta(self); |
8684 | auto out_meta = to_meta(out); |
8685 | at::AutoDispatchSkipFunctionalize func_guard; |
8686 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8687 | at::_ops::unsafe_split_with_sizes_out::call(self_meta, split_sizes, dim, out_meta); |
8688 | } |
8689 | |
8690 | at::Tensor self_; |
8691 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8692 | at::functionalization::impl::sync(self); |
8693 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8694 | } else { |
8695 | self_ = self; |
8696 | } |
8697 | |
8698 | ::std::vector<at::Tensor> out_; |
8699 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8700 | at::functionalization::impl::sync(out); |
8701 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8702 | } else { |
8703 | out_ = out.vec(); |
8704 | } |
8705 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8706 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8707 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8708 | TORCH_INTERNAL_ASSERT(false, |
8709 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8710 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8711 | } else { |
8712 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8713 | at::AutoDispatchSkipFunctionalize guard; |
8714 | at::_ops::unsafe_split_with_sizes_out::call(self_, split_sizes, dim, out_); |
8715 | ; |
8716 | } |
8717 | } else { |
8718 | ::std::vector<at::Tensor> tmp_output; |
8719 | { |
8720 | at::AutoDispatchSkipFunctionalize guard; |
8721 | tmp_output = at::_ops::unsafe_split_with_sizes::call(self_, split_sizes, dim); |
8722 | } |
8723 | at::functionalization::impl::replace_(out, tmp_output); |
8724 | at::functionalization::impl::commit_update(out); |
8725 | at::functionalization::impl::sync(out); |
8726 | |
8727 | } |
8728 | } |
8729 | |
8730 | at::Tensor & square_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
8731 | if (false) { |
8732 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8733 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8734 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8735 | auto self_meta = to_meta(self); |
8736 | auto out_meta = to_meta(out); |
8737 | at::AutoDispatchSkipFunctionalize func_guard; |
8738 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8739 | at::_ops::square_out::call(self_meta, out_meta); |
8740 | } |
8741 | |
8742 | at::Tensor self_; |
8743 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8744 | at::functionalization::impl::sync(self); |
8745 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8746 | } else { |
8747 | self_ = self; |
8748 | } |
8749 | |
8750 | at::Tensor out_; |
8751 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8752 | at::functionalization::impl::sync(out); |
8753 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8754 | } else { |
8755 | out_ = out; |
8756 | } |
8757 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8758 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8759 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8760 | TORCH_INTERNAL_ASSERT(false, |
8761 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8762 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8763 | } else { |
8764 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8765 | at::AutoDispatchSkipFunctionalize guard; |
8766 | at::Tensor tmp_output = at::_ops::square_out::call(self_, out_); |
8767 | return out;; |
8768 | } |
8769 | } else { |
8770 | at::Tensor tmp_output; |
8771 | { |
8772 | at::AutoDispatchSkipFunctionalize guard; |
8773 | tmp_output = at::_ops::square::call(self_); |
8774 | } |
8775 | at::functionalization::impl::replace_(out, tmp_output); |
8776 | at::functionalization::impl::commit_update(out); |
8777 | at::functionalization::impl::sync(out); |
8778 | return out; |
8779 | } |
8780 | } |
8781 | |
8782 | at::Tensor & square_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
8783 | if (true) { |
8784 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8785 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8786 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8787 | auto self_meta = to_meta(self); |
8788 | at::AutoDispatchSkipFunctionalize func_guard; |
8789 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8790 | at::_ops::square_::call(self_meta); |
8791 | } |
8792 | |
8793 | at::Tensor self_; |
8794 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8795 | at::functionalization::impl::sync(self); |
8796 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8797 | } else { |
8798 | self_ = self; |
8799 | } |
8800 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
8801 | if ((false)) { |
8802 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8803 | TORCH_INTERNAL_ASSERT(false, |
8804 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8805 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8806 | } else { |
8807 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8808 | at::AutoDispatchSkipFunctionalize guard; |
8809 | at::Tensor tmp_output = at::_ops::square_::call(self_); |
8810 | return self;; |
8811 | } |
8812 | } else { |
8813 | at::Tensor tmp_output; |
8814 | { |
8815 | at::AutoDispatchSkipFunctionalize guard; |
8816 | tmp_output = at::_ops::square::call(self_); |
8817 | } |
8818 | at::functionalization::impl::replace_(self, tmp_output); |
8819 | at::functionalization::impl::commit_update(self); |
8820 | at::functionalization::impl::sync(self); |
8821 | return self; |
8822 | } |
8823 | } |
8824 | |
8825 | at::Tensor & tanh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
8826 | if (false) { |
8827 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8828 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8829 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8830 | auto self_meta = to_meta(self); |
8831 | auto out_meta = to_meta(out); |
8832 | at::AutoDispatchSkipFunctionalize func_guard; |
8833 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8834 | at::_ops::tanh_out::call(self_meta, out_meta); |
8835 | } |
8836 | |
8837 | at::Tensor self_; |
8838 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8839 | at::functionalization::impl::sync(self); |
8840 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8841 | } else { |
8842 | self_ = self; |
8843 | } |
8844 | |
8845 | at::Tensor out_; |
8846 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8847 | at::functionalization::impl::sync(out); |
8848 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8849 | } else { |
8850 | out_ = out; |
8851 | } |
8852 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8853 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8854 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8855 | TORCH_INTERNAL_ASSERT(false, |
8856 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8857 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8858 | } else { |
8859 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8860 | at::AutoDispatchSkipFunctionalize guard; |
8861 | at::Tensor tmp_output = at::_ops::tanh_out::call(self_, out_); |
8862 | return out;; |
8863 | } |
8864 | } else { |
8865 | at::Tensor tmp_output; |
8866 | { |
8867 | at::AutoDispatchSkipFunctionalize guard; |
8868 | tmp_output = at::_ops::tanh::call(self_); |
8869 | } |
8870 | at::functionalization::impl::replace_(out, tmp_output); |
8871 | at::functionalization::impl::commit_update(out); |
8872 | at::functionalization::impl::sync(out); |
8873 | return out; |
8874 | } |
8875 | } |
8876 | |
8877 | at::Tensor & tanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
8878 | if (true) { |
8879 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8880 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8881 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8882 | auto self_meta = to_meta(self); |
8883 | at::AutoDispatchSkipFunctionalize func_guard; |
8884 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8885 | at::_ops::tanh_::call(self_meta); |
8886 | } |
8887 | |
8888 | at::Tensor self_; |
8889 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8890 | at::functionalization::impl::sync(self); |
8891 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8892 | } else { |
8893 | self_ = self; |
8894 | } |
8895 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
8896 | if ((false)) { |
8897 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8898 | TORCH_INTERNAL_ASSERT(false, |
8899 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8900 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8901 | } else { |
8902 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8903 | at::AutoDispatchSkipFunctionalize guard; |
8904 | at::Tensor tmp_output = at::_ops::tanh_::call(self_); |
8905 | return self;; |
8906 | } |
8907 | } else { |
8908 | at::Tensor tmp_output; |
8909 | { |
8910 | at::AutoDispatchSkipFunctionalize guard; |
8911 | tmp_output = at::_ops::tanh::call(self_); |
8912 | } |
8913 | at::functionalization::impl::replace_(self, tmp_output); |
8914 | at::functionalization::impl::commit_update(self); |
8915 | at::functionalization::impl::sync(self); |
8916 | return self; |
8917 | } |
8918 | } |
8919 | |
8920 | at::Tensor & roll_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) { |
8921 | if (false) { |
8922 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8923 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8924 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8925 | auto self_meta = to_meta(self); |
8926 | auto out_meta = to_meta(out); |
8927 | at::AutoDispatchSkipFunctionalize func_guard; |
8928 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8929 | at::_ops::roll_out::call(self_meta, shifts, dims, out_meta); |
8930 | } |
8931 | |
8932 | at::Tensor self_; |
8933 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8934 | at::functionalization::impl::sync(self); |
8935 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8936 | } else { |
8937 | self_ = self; |
8938 | } |
8939 | |
8940 | at::Tensor out_; |
8941 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8942 | at::functionalization::impl::sync(out); |
8943 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8944 | } else { |
8945 | out_ = out; |
8946 | } |
8947 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8948 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8949 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8950 | TORCH_INTERNAL_ASSERT(false, |
8951 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8952 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8953 | } else { |
8954 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8955 | at::AutoDispatchSkipFunctionalize guard; |
8956 | at::Tensor tmp_output = at::_ops::roll_out::call(self_, shifts, dims, out_); |
8957 | return out;; |
8958 | } |
8959 | } else { |
8960 | at::Tensor tmp_output; |
8961 | { |
8962 | at::AutoDispatchSkipFunctionalize guard; |
8963 | tmp_output = at::_ops::roll::call(self_, shifts, dims); |
8964 | } |
8965 | at::functionalization::impl::replace_(out, tmp_output); |
8966 | at::functionalization::impl::commit_update(out); |
8967 | at::functionalization::impl::sync(out); |
8968 | return out; |
8969 | } |
8970 | } |
8971 | |
8972 | at::Tensor & rot90_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::IntArrayRef dims, at::Tensor & out) { |
8973 | if (false) { |
8974 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8975 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8976 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8977 | auto self_meta = to_meta(self); |
8978 | auto out_meta = to_meta(out); |
8979 | at::AutoDispatchSkipFunctionalize func_guard; |
8980 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8981 | at::_ops::rot90_out::call(self_meta, k, dims, out_meta); |
8982 | } |
8983 | |
8984 | at::Tensor self_; |
8985 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8986 | at::functionalization::impl::sync(self); |
8987 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8988 | } else { |
8989 | self_ = self; |
8990 | } |
8991 | |
8992 | at::Tensor out_; |
8993 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8994 | at::functionalization::impl::sync(out); |
8995 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8996 | } else { |
8997 | out_ = out; |
8998 | } |
8999 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9000 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9001 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9002 | TORCH_INTERNAL_ASSERT(false, |
9003 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9004 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9005 | } else { |
9006 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9007 | at::AutoDispatchSkipFunctionalize guard; |
9008 | at::Tensor tmp_output = at::_ops::rot90_out::call(self_, k, dims, out_); |
9009 | return out;; |
9010 | } |
9011 | } else { |
9012 | at::Tensor tmp_output; |
9013 | { |
9014 | at::AutoDispatchSkipFunctionalize guard; |
9015 | tmp_output = at::_ops::rot90::call(self_, k, dims); |
9016 | } |
9017 | at::functionalization::impl::replace_(out, tmp_output); |
9018 | at::functionalization::impl::commit_update(out); |
9019 | at::functionalization::impl::sync(out); |
9020 | return out; |
9021 | } |
9022 | } |
9023 | |
9024 | at::Tensor & _trilinear_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor & out) { |
9025 | if (false) { |
9026 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9027 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9028 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9029 | auto i1_meta = to_meta(i1); |
9030 | auto i2_meta = to_meta(i2); |
9031 | auto i3_meta = to_meta(i3); |
9032 | auto out_meta = to_meta(out); |
9033 | at::AutoDispatchSkipFunctionalize func_guard; |
9034 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9035 | at::_ops::_trilinear_out::call(i1_meta, i2_meta, i3_meta, expand1, expand2, expand3, sumdim, unroll_dim, out_meta); |
9036 | } |
9037 | |
9038 | at::Tensor i1_; |
9039 | if (at::functionalization::impl::isFunctionalTensor(i1)) { |
9040 | at::functionalization::impl::sync(i1); |
9041 | i1_ = at::functionalization::impl::from_functional_tensor(i1); |
9042 | } else { |
9043 | i1_ = i1; |
9044 | } |
9045 | |
9046 | at::Tensor i2_; |
9047 | if (at::functionalization::impl::isFunctionalTensor(i2)) { |
9048 | at::functionalization::impl::sync(i2); |
9049 | i2_ = at::functionalization::impl::from_functional_tensor(i2); |
9050 | } else { |
9051 | i2_ = i2; |
9052 | } |
9053 | |
9054 | at::Tensor i3_; |
9055 | if (at::functionalization::impl::isFunctionalTensor(i3)) { |
9056 | at::functionalization::impl::sync(i3); |
9057 | i3_ = at::functionalization::impl::from_functional_tensor(i3); |
9058 | } else { |
9059 | i3_ = i3; |
9060 | } |
9061 | |
9062 | at::Tensor out_; |
9063 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9064 | at::functionalization::impl::sync(out); |
9065 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9066 | } else { |
9067 | out_ = out; |
9068 | } |
9069 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9070 | if ((false || at::functionalization::impl::isFunctionalTensor(i1) || at::functionalization::impl::isFunctionalTensor(i2) || at::functionalization::impl::isFunctionalTensor(i3))) { |
9071 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9072 | TORCH_INTERNAL_ASSERT(false, |
9073 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9074 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9075 | } else { |
9076 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9077 | at::AutoDispatchSkipFunctionalize guard; |
9078 | at::Tensor tmp_output = at::_ops::_trilinear_out::call(i1_, i2_, i3_, expand1, expand2, expand3, sumdim, unroll_dim, out_); |
9079 | return out;; |
9080 | } |
9081 | } else { |
9082 | at::Tensor tmp_output; |
9083 | { |
9084 | at::AutoDispatchSkipFunctionalize guard; |
9085 | tmp_output = at::_ops::_trilinear::call(i1_, i2_, i3_, expand1, expand2, expand3, sumdim, unroll_dim); |
9086 | } |
9087 | at::functionalization::impl::replace_(out, tmp_output); |
9088 | at::functionalization::impl::commit_update(out); |
9089 | at::functionalization::impl::sync(out); |
9090 | return out; |
9091 | } |
9092 | } |
9093 | |
9094 | ::std::tuple<at::Tensor &,at::Tensor &> _unique_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1) { |
9095 | if (false) { |
9096 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9097 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9098 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9099 | auto self_meta = to_meta(self); |
9100 | auto out0_meta = to_meta(out0); |
9101 | auto out1_meta = to_meta(out1); |
9102 | at::AutoDispatchSkipFunctionalize func_guard; |
9103 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9104 | at::_ops::_unique_out::call(self_meta, sorted, return_inverse, out0_meta, out1_meta); |
9105 | } |
9106 | |
9107 | at::Tensor self_; |
9108 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9109 | at::functionalization::impl::sync(self); |
9110 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9111 | } else { |
9112 | self_ = self; |
9113 | } |
9114 | |
9115 | at::Tensor out0_; |
9116 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
9117 | at::functionalization::impl::sync(out0); |
9118 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
9119 | } else { |
9120 | out0_ = out0; |
9121 | } |
9122 | |
9123 | at::Tensor out1_; |
9124 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
9125 | at::functionalization::impl::sync(out1); |
9126 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
9127 | } else { |
9128 | out1_ = out1; |
9129 | } |
9130 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
9131 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9132 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9133 | TORCH_INTERNAL_ASSERT(false, |
9134 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9135 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9136 | } else { |
9137 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9138 | at::AutoDispatchSkipFunctionalize guard; |
9139 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_unique_out::call(self_, sorted, return_inverse, out0_, out1_); |
9140 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
9141 | } |
9142 | } else { |
9143 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
9144 | { |
9145 | at::AutoDispatchSkipFunctionalize guard; |
9146 | tmp_output = at::_ops::_unique::call(self_, sorted, return_inverse); |
9147 | } |
9148 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
9149 | at::functionalization::impl::commit_update(out0); |
9150 | at::functionalization::impl::sync(out0); |
9151 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
9152 | at::functionalization::impl::commit_update(out1); |
9153 | at::functionalization::impl::sync(out1); |
9154 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
9155 | } |
9156 | } |
9157 | |
9158 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9159 | if (false) { |
9160 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9161 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9162 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9163 | auto self_meta = to_meta(self); |
9164 | auto out0_meta = to_meta(out0); |
9165 | auto out1_meta = to_meta(out1); |
9166 | auto out2_meta = to_meta(out2); |
9167 | at::AutoDispatchSkipFunctionalize func_guard; |
9168 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9169 | at::_ops::_unique2_out::call(self_meta, sorted, return_inverse, return_counts, out0_meta, out1_meta, out2_meta); |
9170 | } |
9171 | |
9172 | at::Tensor self_; |
9173 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9174 | at::functionalization::impl::sync(self); |
9175 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9176 | } else { |
9177 | self_ = self; |
9178 | } |
9179 | |
9180 | at::Tensor out0_; |
9181 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
9182 | at::functionalization::impl::sync(out0); |
9183 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
9184 | } else { |
9185 | out0_ = out0; |
9186 | } |
9187 | |
9188 | at::Tensor out1_; |
9189 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
9190 | at::functionalization::impl::sync(out1); |
9191 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
9192 | } else { |
9193 | out1_ = out1; |
9194 | } |
9195 | |
9196 | at::Tensor out2_; |
9197 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
9198 | at::functionalization::impl::sync(out2); |
9199 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
9200 | } else { |
9201 | out2_ = out2; |
9202 | } |
9203 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
9204 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9205 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9206 | TORCH_INTERNAL_ASSERT(false, |
9207 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9208 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9209 | } else { |
9210 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9211 | at::AutoDispatchSkipFunctionalize guard; |
9212 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_unique2_out::call(self_, sorted, return_inverse, return_counts, out0_, out1_, out2_); |
9213 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);; |
9214 | } |
9215 | } else { |
9216 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
9217 | { |
9218 | at::AutoDispatchSkipFunctionalize guard; |
9219 | tmp_output = at::_ops::_unique2::call(self_, sorted, return_inverse, return_counts); |
9220 | } |
9221 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
9222 | at::functionalization::impl::commit_update(out0); |
9223 | at::functionalization::impl::sync(out0); |
9224 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
9225 | at::functionalization::impl::commit_update(out1); |
9226 | at::functionalization::impl::sync(out1); |
9227 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
9228 | at::functionalization::impl::commit_update(out2); |
9229 | at::functionalization::impl::sync(out2); |
9230 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
9231 | } |
9232 | } |
9233 | |
9234 | ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) { |
9235 | if (false) { |
9236 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9237 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9238 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9239 | auto v_meta = to_meta(v); |
9240 | auto g_meta = to_meta(g); |
9241 | auto out0_meta = to_meta(out0); |
9242 | auto out1_meta = to_meta(out1); |
9243 | at::AutoDispatchSkipFunctionalize func_guard; |
9244 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9245 | at::_ops::_weight_norm_interface_out::call(v_meta, g_meta, dim, out0_meta, out1_meta); |
9246 | } |
9247 | |
9248 | at::Tensor v_; |
9249 | if (at::functionalization::impl::isFunctionalTensor(v)) { |
9250 | at::functionalization::impl::sync(v); |
9251 | v_ = at::functionalization::impl::from_functional_tensor(v); |
9252 | } else { |
9253 | v_ = v; |
9254 | } |
9255 | |
9256 | at::Tensor g_; |
9257 | if (at::functionalization::impl::isFunctionalTensor(g)) { |
9258 | at::functionalization::impl::sync(g); |
9259 | g_ = at::functionalization::impl::from_functional_tensor(g); |
9260 | } else { |
9261 | g_ = g; |
9262 | } |
9263 | |
9264 | at::Tensor out0_; |
9265 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
9266 | at::functionalization::impl::sync(out0); |
9267 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
9268 | } else { |
9269 | out0_ = out0; |
9270 | } |
9271 | |
9272 | at::Tensor out1_; |
9273 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
9274 | at::functionalization::impl::sync(out1); |
9275 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
9276 | } else { |
9277 | out1_ = out1; |
9278 | } |
9279 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
9280 | if ((false || at::functionalization::impl::isFunctionalTensor(v) || at::functionalization::impl::isFunctionalTensor(g))) { |
9281 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9282 | TORCH_INTERNAL_ASSERT(false, |
9283 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9284 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9285 | } else { |
9286 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9287 | at::AutoDispatchSkipFunctionalize guard; |
9288 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_weight_norm_interface_out::call(v_, g_, dim, out0_, out1_); |
9289 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
9290 | } |
9291 | } else { |
9292 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
9293 | { |
9294 | at::AutoDispatchSkipFunctionalize guard; |
9295 | tmp_output = at::_ops::_weight_norm_interface::call(v_, g_, dim); |
9296 | } |
9297 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
9298 | at::functionalization::impl::commit_update(out0); |
9299 | at::functionalization::impl::sync(out0); |
9300 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
9301 | at::functionalization::impl::commit_update(out1); |
9302 | at::functionalization::impl::sync(out1); |
9303 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
9304 | } |
9305 | } |
9306 | |
9307 | at::Tensor & _efficientzerotensor_out_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { |
9308 | if (false) { |
9309 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9310 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9311 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9312 | auto out_meta = to_meta(out); |
9313 | at::AutoDispatchSkipFunctionalize func_guard; |
9314 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9315 | at::_ops::_efficientzerotensor_out::call(size, out_meta); |
9316 | } |
9317 | |
9318 | at::Tensor out_; |
9319 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9320 | at::functionalization::impl::sync(out); |
9321 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9322 | } else { |
9323 | out_ = out; |
9324 | } |
9325 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9326 | if ((false)) { |
9327 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9328 | TORCH_INTERNAL_ASSERT(false, |
9329 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9330 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9331 | } else { |
9332 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9333 | at::AutoDispatchSkipFunctionalize guard; |
9334 | at::Tensor tmp_output = at::_ops::_efficientzerotensor_out::call(size, out_); |
9335 | return out;; |
9336 | } |
9337 | } else { |
9338 | at::Tensor tmp_output; |
9339 | { |
9340 | at::AutoDispatchSkipFunctionalize guard; |
9341 | tmp_output = at::_ops::_efficientzerotensor::call(size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
9342 | } |
9343 | at::functionalization::impl::replace_(out, tmp_output); |
9344 | at::functionalization::impl::commit_update(out); |
9345 | at::functionalization::impl::sync(out); |
9346 | return out; |
9347 | } |
9348 | } |
9349 | |
9350 | at::Tensor & _standard_gamma_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
9351 | if (false) { |
9352 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9353 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9354 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9355 | auto self_meta = to_meta(self); |
9356 | auto out_meta = to_meta(out); |
9357 | at::AutoDispatchSkipFunctionalize func_guard; |
9358 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9359 | at::_ops::_standard_gamma_out::call(self_meta, generator, out_meta); |
9360 | } |
9361 | |
9362 | at::Tensor self_; |
9363 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9364 | at::functionalization::impl::sync(self); |
9365 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9366 | } else { |
9367 | self_ = self; |
9368 | } |
9369 | |
9370 | at::Tensor out_; |
9371 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9372 | at::functionalization::impl::sync(out); |
9373 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9374 | } else { |
9375 | out_ = out; |
9376 | } |
9377 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9378 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9379 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9380 | TORCH_INTERNAL_ASSERT(false, |
9381 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9382 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9383 | } else { |
9384 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9385 | at::AutoDispatchSkipFunctionalize guard; |
9386 | at::Tensor tmp_output = at::_ops::_standard_gamma_out::call(self_, generator, out_); |
9387 | return out;; |
9388 | } |
9389 | } else { |
9390 | at::Tensor tmp_output; |
9391 | { |
9392 | at::AutoDispatchSkipFunctionalize guard; |
9393 | tmp_output = at::_ops::_standard_gamma::call(self_, generator); |
9394 | } |
9395 | at::functionalization::impl::replace_(out, tmp_output); |
9396 | at::functionalization::impl::commit_update(out); |
9397 | at::functionalization::impl::sync(out); |
9398 | return out; |
9399 | } |
9400 | } |
9401 | |
9402 | at::Tensor & _dirichlet_grad_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out) { |
9403 | if (false) { |
9404 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9405 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9406 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9407 | auto x_meta = to_meta(x); |
9408 | auto alpha_meta = to_meta(alpha); |
9409 | auto total_meta = to_meta(total); |
9410 | auto out_meta = to_meta(out); |
9411 | at::AutoDispatchSkipFunctionalize func_guard; |
9412 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9413 | at::_ops::_dirichlet_grad_out::call(x_meta, alpha_meta, total_meta, out_meta); |
9414 | } |
9415 | |
9416 | at::Tensor x_; |
9417 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
9418 | at::functionalization::impl::sync(x); |
9419 | x_ = at::functionalization::impl::from_functional_tensor(x); |
9420 | } else { |
9421 | x_ = x; |
9422 | } |
9423 | |
9424 | at::Tensor alpha_; |
9425 | if (at::functionalization::impl::isFunctionalTensor(alpha)) { |
9426 | at::functionalization::impl::sync(alpha); |
9427 | alpha_ = at::functionalization::impl::from_functional_tensor(alpha); |
9428 | } else { |
9429 | alpha_ = alpha; |
9430 | } |
9431 | |
9432 | at::Tensor total_; |
9433 | if (at::functionalization::impl::isFunctionalTensor(total)) { |
9434 | at::functionalization::impl::sync(total); |
9435 | total_ = at::functionalization::impl::from_functional_tensor(total); |
9436 | } else { |
9437 | total_ = total; |
9438 | } |
9439 | |
9440 | at::Tensor out_; |
9441 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9442 | at::functionalization::impl::sync(out); |
9443 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9444 | } else { |
9445 | out_ = out; |
9446 | } |
9447 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9448 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(alpha) || at::functionalization::impl::isFunctionalTensor(total))) { |
9449 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9450 | TORCH_INTERNAL_ASSERT(false, |
9451 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9452 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9453 | } else { |
9454 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9455 | at::AutoDispatchSkipFunctionalize guard; |
9456 | at::Tensor tmp_output = at::_ops::_dirichlet_grad_out::call(x_, alpha_, total_, out_); |
9457 | return out;; |
9458 | } |
9459 | } else { |
9460 | at::Tensor tmp_output; |
9461 | { |
9462 | at::AutoDispatchSkipFunctionalize guard; |
9463 | tmp_output = at::_ops::_dirichlet_grad::call(x_, alpha_, total_); |
9464 | } |
9465 | at::functionalization::impl::replace_(out, tmp_output); |
9466 | at::functionalization::impl::commit_update(out); |
9467 | at::functionalization::impl::sync(out); |
9468 | return out; |
9469 | } |
9470 | } |
9471 | |
9472 | at::Tensor & norm_out_ScalarOpt_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype, at::Tensor & out) { |
9473 | if (false) { |
9474 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9475 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9476 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9477 | auto self_meta = to_meta(self); |
9478 | auto out_meta = to_meta(out); |
9479 | at::AutoDispatchSkipFunctionalize func_guard; |
9480 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9481 | at::_ops::norm_ScalarOpt_dtype_out::call(self_meta, p, dtype, out_meta); |
9482 | } |
9483 | |
9484 | at::Tensor self_; |
9485 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9486 | at::functionalization::impl::sync(self); |
9487 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9488 | } else { |
9489 | self_ = self; |
9490 | } |
9491 | |
9492 | at::Tensor out_; |
9493 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9494 | at::functionalization::impl::sync(out); |
9495 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9496 | } else { |
9497 | out_ = out; |
9498 | } |
9499 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9500 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9501 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9502 | TORCH_INTERNAL_ASSERT(false, |
9503 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9504 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9505 | } else { |
9506 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9507 | at::AutoDispatchSkipFunctionalize guard; |
9508 | at::Tensor tmp_output = at::_ops::norm_ScalarOpt_dtype_out::call(self_, p, dtype, out_); |
9509 | return out;; |
9510 | } |
9511 | } else { |
9512 | at::Tensor tmp_output; |
9513 | { |
9514 | at::AutoDispatchSkipFunctionalize guard; |
9515 | tmp_output = at::_ops::norm_ScalarOpt_dtype::call(self_, p, dtype); |
9516 | } |
9517 | at::functionalization::impl::replace_(out, tmp_output); |
9518 | at::functionalization::impl::commit_update(out); |
9519 | at::functionalization::impl::sync(out); |
9520 | return out; |
9521 | } |
9522 | } |
9523 | |
9524 | at::Tensor & norm_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, at::Tensor & out) { |
9525 | if (false) { |
9526 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9527 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9528 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9529 | auto self_meta = to_meta(self); |
9530 | auto out_meta = to_meta(out); |
9531 | at::AutoDispatchSkipFunctionalize func_guard; |
9532 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9533 | at::_ops::norm_Scalar_out::call(self_meta, p, out_meta); |
9534 | } |
9535 | |
9536 | at::Tensor self_; |
9537 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9538 | at::functionalization::impl::sync(self); |
9539 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9540 | } else { |
9541 | self_ = self; |
9542 | } |
9543 | |
9544 | at::Tensor out_; |
9545 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9546 | at::functionalization::impl::sync(out); |
9547 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9548 | } else { |
9549 | out_ = out; |
9550 | } |
9551 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9552 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9553 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9554 | TORCH_INTERNAL_ASSERT(false, |
9555 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9556 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9557 | } else { |
9558 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9559 | at::AutoDispatchSkipFunctionalize guard; |
9560 | at::Tensor tmp_output = at::_ops::norm_Scalar_out::call(self_, p, out_); |
9561 | return out;; |
9562 | } |
9563 | } else { |
9564 | at::Tensor tmp_output; |
9565 | { |
9566 | at::AutoDispatchSkipFunctionalize guard; |
9567 | tmp_output = at::_ops::norm_Scalar::call(self_, p); |
9568 | } |
9569 | at::functionalization::impl::replace_(out, tmp_output); |
9570 | at::functionalization::impl::commit_update(out); |
9571 | at::functionalization::impl::sync(out); |
9572 | return out; |
9573 | } |
9574 | } |
9575 | |
9576 | at::Tensor & norm_out_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) { |
9577 | if (false) { |
9578 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9579 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9580 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9581 | auto self_meta = to_meta(self); |
9582 | auto out_meta = to_meta(out); |
9583 | at::AutoDispatchSkipFunctionalize func_guard; |
9584 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9585 | at::_ops::norm_dtype_out::call(self_meta, p, dim, keepdim, dtype, out_meta); |
9586 | } |
9587 | |
9588 | at::Tensor self_; |
9589 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9590 | at::functionalization::impl::sync(self); |
9591 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9592 | } else { |
9593 | self_ = self; |
9594 | } |
9595 | |
9596 | at::Tensor out_; |
9597 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9598 | at::functionalization::impl::sync(out); |
9599 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9600 | } else { |
9601 | out_ = out; |
9602 | } |
9603 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9604 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9605 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9606 | TORCH_INTERNAL_ASSERT(false, |
9607 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9608 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9609 | } else { |
9610 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9611 | at::AutoDispatchSkipFunctionalize guard; |
9612 | at::Tensor tmp_output = at::_ops::norm_dtype_out::call(self_, p, dim, keepdim, dtype, out_); |
9613 | return out;; |
9614 | } |
9615 | } else { |
9616 | at::Tensor tmp_output; |
9617 | { |
9618 | at::AutoDispatchSkipFunctionalize guard; |
9619 | tmp_output = at::_ops::norm_ScalarOpt_dim_dtype::call(self_, p, dim, keepdim, dtype); |
9620 | } |
9621 | at::functionalization::impl::replace_(out, tmp_output); |
9622 | at::functionalization::impl::commit_update(out); |
9623 | at::functionalization::impl::sync(out); |
9624 | return out; |
9625 | } |
9626 | } |
9627 | |
9628 | at::Tensor & norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { |
9629 | if (false) { |
9630 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9631 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9632 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9633 | auto self_meta = to_meta(self); |
9634 | auto out_meta = to_meta(out); |
9635 | at::AutoDispatchSkipFunctionalize func_guard; |
9636 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9637 | at::_ops::norm_out::call(self_meta, p, dim, keepdim, out_meta); |
9638 | } |
9639 | |
9640 | at::Tensor self_; |
9641 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9642 | at::functionalization::impl::sync(self); |
9643 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9644 | } else { |
9645 | self_ = self; |
9646 | } |
9647 | |
9648 | at::Tensor out_; |
9649 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9650 | at::functionalization::impl::sync(out); |
9651 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9652 | } else { |
9653 | out_ = out; |
9654 | } |
9655 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9656 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9657 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9658 | TORCH_INTERNAL_ASSERT(false, |
9659 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9660 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9661 | } else { |
9662 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9663 | at::AutoDispatchSkipFunctionalize guard; |
9664 | at::Tensor tmp_output = at::_ops::norm_out::call(self_, p, dim, keepdim, out_); |
9665 | return out;; |
9666 | } |
9667 | } else { |
9668 | at::Tensor tmp_output; |
9669 | { |
9670 | at::AutoDispatchSkipFunctionalize guard; |
9671 | tmp_output = at::_ops::norm_ScalarOpt_dim::call(self_, p, dim, keepdim); |
9672 | } |
9673 | at::functionalization::impl::replace_(out, tmp_output); |
9674 | at::functionalization::impl::commit_update(out); |
9675 | at::functionalization::impl::sync(out); |
9676 | return out; |
9677 | } |
9678 | } |
9679 | |
9680 | at::Tensor & norm_out_names_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) { |
9681 | if (false) { |
9682 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9683 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9684 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9685 | auto self_meta = to_meta(self); |
9686 | auto out_meta = to_meta(out); |
9687 | at::AutoDispatchSkipFunctionalize func_guard; |
9688 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9689 | at::_ops::norm_names_dtype_out::call(self_meta, p, dim, keepdim, dtype, out_meta); |
9690 | } |
9691 | |
9692 | at::Tensor self_; |
9693 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9694 | at::functionalization::impl::sync(self); |
9695 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9696 | } else { |
9697 | self_ = self; |
9698 | } |
9699 | |
9700 | at::Tensor out_; |
9701 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9702 | at::functionalization::impl::sync(out); |
9703 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9704 | } else { |
9705 | out_ = out; |
9706 | } |
9707 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9708 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9709 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9710 | TORCH_INTERNAL_ASSERT(false, |
9711 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9712 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9713 | } else { |
9714 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9715 | at::AutoDispatchSkipFunctionalize guard; |
9716 | at::Tensor tmp_output = at::_ops::norm_names_dtype_out::call(self_, p, dim, keepdim, dtype, out_); |
9717 | return out;; |
9718 | } |
9719 | } else { |
9720 | at::Tensor tmp_output; |
9721 | { |
9722 | at::AutoDispatchSkipFunctionalize guard; |
9723 | tmp_output = at::_ops::norm_names_ScalarOpt_dim_dtype::call(self_, p, dim, keepdim, dtype); |
9724 | } |
9725 | at::functionalization::impl::replace_(out, tmp_output); |
9726 | at::functionalization::impl::commit_update(out); |
9727 | at::functionalization::impl::sync(out); |
9728 | return out; |
9729 | } |
9730 | } |
9731 | |
9732 | at::Tensor & norm_out_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out) { |
9733 | if (false) { |
9734 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9735 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9736 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9737 | auto self_meta = to_meta(self); |
9738 | auto out_meta = to_meta(out); |
9739 | at::AutoDispatchSkipFunctionalize func_guard; |
9740 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9741 | at::_ops::norm_names_out::call(self_meta, p, dim, keepdim, out_meta); |
9742 | } |
9743 | |
9744 | at::Tensor self_; |
9745 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9746 | at::functionalization::impl::sync(self); |
9747 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9748 | } else { |
9749 | self_ = self; |
9750 | } |
9751 | |
9752 | at::Tensor out_; |
9753 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9754 | at::functionalization::impl::sync(out); |
9755 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9756 | } else { |
9757 | out_ = out; |
9758 | } |
9759 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9760 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9761 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9762 | TORCH_INTERNAL_ASSERT(false, |
9763 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9764 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9765 | } else { |
9766 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9767 | at::AutoDispatchSkipFunctionalize guard; |
9768 | at::Tensor tmp_output = at::_ops::norm_names_out::call(self_, p, dim, keepdim, out_); |
9769 | return out;; |
9770 | } |
9771 | } else { |
9772 | at::Tensor tmp_output; |
9773 | { |
9774 | at::AutoDispatchSkipFunctionalize guard; |
9775 | tmp_output = at::_ops::norm_names_ScalarOpt_dim::call(self_, p, dim, keepdim); |
9776 | } |
9777 | at::functionalization::impl::replace_(out, tmp_output); |
9778 | at::functionalization::impl::commit_update(out); |
9779 | at::functionalization::impl::sync(out); |
9780 | return out; |
9781 | } |
9782 | } |
9783 | |
9784 | ::std::tuple<at::Tensor &,at::Tensor &> frexp_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) { |
9785 | if (false) { |
9786 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9787 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9788 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9789 | auto self_meta = to_meta(self); |
9790 | auto mantissa_meta = to_meta(mantissa); |
9791 | auto exponent_meta = to_meta(exponent); |
9792 | at::AutoDispatchSkipFunctionalize func_guard; |
9793 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9794 | at::_ops::frexp_Tensor_out::call(self_meta, mantissa_meta, exponent_meta); |
9795 | } |
9796 | |
9797 | at::Tensor self_; |
9798 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9799 | at::functionalization::impl::sync(self); |
9800 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9801 | } else { |
9802 | self_ = self; |
9803 | } |
9804 | |
9805 | at::Tensor mantissa_; |
9806 | if (at::functionalization::impl::isFunctionalTensor(mantissa)) { |
9807 | at::functionalization::impl::sync(mantissa); |
9808 | mantissa_ = at::functionalization::impl::from_functional_tensor(mantissa); |
9809 | } else { |
9810 | mantissa_ = mantissa; |
9811 | } |
9812 | |
9813 | at::Tensor exponent_; |
9814 | if (at::functionalization::impl::isFunctionalTensor(exponent)) { |
9815 | at::functionalization::impl::sync(exponent); |
9816 | exponent_ = at::functionalization::impl::from_functional_tensor(exponent); |
9817 | } else { |
9818 | exponent_ = exponent; |
9819 | } |
9820 | if (!(true && at::functionalization::impl::isFunctionalTensor(mantissa) && at::functionalization::impl::isFunctionalTensor(exponent))) { |
9821 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9822 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9823 | TORCH_INTERNAL_ASSERT(false, |
9824 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9825 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9826 | } else { |
9827 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9828 | at::AutoDispatchSkipFunctionalize guard; |
9829 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::frexp_Tensor_out::call(self_, mantissa_, exponent_); |
9830 | return ::std::tuple<at::Tensor &,at::Tensor &>(mantissa, exponent);; |
9831 | } |
9832 | } else { |
9833 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
9834 | { |
9835 | at::AutoDispatchSkipFunctionalize guard; |
9836 | tmp_output = at::_ops::frexp_Tensor::call(self_); |
9837 | } |
9838 | at::functionalization::impl::replace_(mantissa, std::get<0>(tmp_output)); |
9839 | at::functionalization::impl::commit_update(mantissa); |
9840 | at::functionalization::impl::sync(mantissa); |
9841 | at::functionalization::impl::replace_(exponent, std::get<1>(tmp_output)); |
9842 | at::functionalization::impl::commit_update(exponent); |
9843 | at::functionalization::impl::sync(exponent); |
9844 | return ::std::tuple<at::Tensor &,at::Tensor &>(mantissa, exponent); |
9845 | } |
9846 | } |
9847 | |
9848 | at::Tensor & frobenius_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { |
9849 | if (false) { |
9850 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9851 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9852 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9853 | auto self_meta = to_meta(self); |
9854 | auto out_meta = to_meta(out); |
9855 | at::AutoDispatchSkipFunctionalize func_guard; |
9856 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9857 | at::_ops::frobenius_norm_out::call(self_meta, dim, keepdim, out_meta); |
9858 | } |
9859 | |
9860 | at::Tensor self_; |
9861 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9862 | at::functionalization::impl::sync(self); |
9863 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9864 | } else { |
9865 | self_ = self; |
9866 | } |
9867 | |
9868 | at::Tensor out_; |
9869 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9870 | at::functionalization::impl::sync(out); |
9871 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9872 | } else { |
9873 | out_ = out; |
9874 | } |
9875 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9876 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9877 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9878 | TORCH_INTERNAL_ASSERT(false, |
9879 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9880 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9881 | } else { |
9882 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9883 | at::AutoDispatchSkipFunctionalize guard; |
9884 | at::Tensor tmp_output = at::_ops::frobenius_norm_out::call(self_, dim, keepdim, out_); |
9885 | return out;; |
9886 | } |
9887 | } else { |
9888 | at::Tensor tmp_output; |
9889 | { |
9890 | at::AutoDispatchSkipFunctionalize guard; |
9891 | tmp_output = at::_ops::frobenius_norm_dim::call(self_, dim, keepdim); |
9892 | } |
9893 | at::functionalization::impl::replace_(out, tmp_output); |
9894 | at::functionalization::impl::commit_update(out); |
9895 | at::functionalization::impl::sync(out); |
9896 | return out; |
9897 | } |
9898 | } |
9899 | |
9900 | at::Tensor & nuclear_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool keepdim, at::Tensor & out) { |
9901 | if (false) { |
9902 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9903 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9904 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9905 | auto self_meta = to_meta(self); |
9906 | auto out_meta = to_meta(out); |
9907 | at::AutoDispatchSkipFunctionalize func_guard; |
9908 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9909 | at::_ops::nuclear_norm_out::call(self_meta, keepdim, out_meta); |
9910 | } |
9911 | |
9912 | at::Tensor self_; |
9913 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9914 | at::functionalization::impl::sync(self); |
9915 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9916 | } else { |
9917 | self_ = self; |
9918 | } |
9919 | |
9920 | at::Tensor out_; |
9921 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9922 | at::functionalization::impl::sync(out); |
9923 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9924 | } else { |
9925 | out_ = out; |
9926 | } |
9927 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9928 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9929 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9930 | TORCH_INTERNAL_ASSERT(false, |
9931 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9932 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9933 | } else { |
9934 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9935 | at::AutoDispatchSkipFunctionalize guard; |
9936 | at::Tensor tmp_output = at::_ops::nuclear_norm_out::call(self_, keepdim, out_); |
9937 | return out;; |
9938 | } |
9939 | } else { |
9940 | at::Tensor tmp_output; |
9941 | { |
9942 | at::AutoDispatchSkipFunctionalize guard; |
9943 | tmp_output = at::_ops::nuclear_norm::call(self_, keepdim); |
9944 | } |
9945 | at::functionalization::impl::replace_(out, tmp_output); |
9946 | at::functionalization::impl::commit_update(out); |
9947 | at::functionalization::impl::sync(out); |
9948 | return out; |
9949 | } |
9950 | } |
9951 | |
9952 | at::Tensor & nuclear_norm_out_dim_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { |
9953 | if (false) { |
9954 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9955 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9956 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9957 | auto self_meta = to_meta(self); |
9958 | auto out_meta = to_meta(out); |
9959 | at::AutoDispatchSkipFunctionalize func_guard; |
9960 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9961 | at::_ops::nuclear_norm_dim_out::call(self_meta, dim, keepdim, out_meta); |
9962 | } |
9963 | |
9964 | at::Tensor self_; |
9965 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9966 | at::functionalization::impl::sync(self); |
9967 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9968 | } else { |
9969 | self_ = self; |
9970 | } |
9971 | |
9972 | at::Tensor out_; |
9973 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9974 | at::functionalization::impl::sync(out); |
9975 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9976 | } else { |
9977 | out_ = out; |
9978 | } |
9979 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9980 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9981 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9982 | TORCH_INTERNAL_ASSERT(false, |
9983 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9984 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9985 | } else { |
9986 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9987 | at::AutoDispatchSkipFunctionalize guard; |
9988 | at::Tensor tmp_output = at::_ops::nuclear_norm_dim_out::call(self_, dim, keepdim, out_); |
9989 | return out;; |
9990 | } |
9991 | } else { |
9992 | at::Tensor tmp_output; |
9993 | { |
9994 | at::AutoDispatchSkipFunctionalize guard; |
9995 | tmp_output = at::_ops::nuclear_norm_dim::call(self_, dim, keepdim); |
9996 | } |
9997 | at::functionalization::impl::replace_(out, tmp_output); |
9998 | at::functionalization::impl::commit_update(out); |
9999 | at::functionalization::impl::sync(out); |
10000 | return out; |
10001 | } |
10002 | } |
10003 | |
10004 | at::Tensor & subtract_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
10005 | if (false) { |
10006 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10007 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10008 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10009 | auto self_meta = to_meta(self); |
10010 | auto other_meta = to_meta(other); |
10011 | auto out_meta = to_meta(out); |
10012 | at::AutoDispatchSkipFunctionalize func_guard; |
10013 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10014 | at::_ops::subtract_out::call(self_meta, other_meta, alpha, out_meta); |
10015 | } |
10016 | |
10017 | at::Tensor self_; |
10018 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10019 | at::functionalization::impl::sync(self); |
10020 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10021 | } else { |
10022 | self_ = self; |
10023 | } |
10024 | |
10025 | at::Tensor other_; |
10026 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
10027 | at::functionalization::impl::sync(other); |
10028 | other_ = at::functionalization::impl::from_functional_tensor(other); |
10029 | } else { |
10030 | other_ = other; |
10031 | } |
10032 | |
10033 | at::Tensor out_; |
10034 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10035 | at::functionalization::impl::sync(out); |
10036 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10037 | } else { |
10038 | out_ = out; |
10039 | } |
10040 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10041 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
10042 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10043 | TORCH_INTERNAL_ASSERT(false, |
10044 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10045 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10046 | } else { |
10047 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10048 | at::AutoDispatchSkipFunctionalize guard; |
10049 | at::Tensor tmp_output = at::_ops::subtract_out::call(self_, other_, alpha, out_); |
10050 | return out;; |
10051 | } |
10052 | } else { |
10053 | at::Tensor tmp_output; |
10054 | { |
10055 | at::AutoDispatchSkipFunctionalize guard; |
10056 | tmp_output = at::_ops::subtract_Tensor::call(self_, other_, alpha); |
10057 | } |
10058 | at::functionalization::impl::replace_(out, tmp_output); |
10059 | at::functionalization::impl::commit_update(out); |
10060 | at::functionalization::impl::sync(out); |
10061 | return out; |
10062 | } |
10063 | } |
10064 | |
10065 | at::Tensor & subtract__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
10066 | if (true) { |
10067 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10068 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10069 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10070 | auto self_meta = to_meta(self); |
10071 | auto other_meta = to_meta(other); |
10072 | at::AutoDispatchSkipFunctionalize func_guard; |
10073 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10074 | at::_ops::subtract__Tensor::call(self_meta, other_meta, alpha); |
10075 | } |
10076 | |
10077 | at::Tensor self_; |
10078 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10079 | at::functionalization::impl::sync(self); |
10080 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10081 | } else { |
10082 | self_ = self; |
10083 | } |
10084 | |
10085 | at::Tensor other_; |
10086 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
10087 | at::functionalization::impl::sync(other); |
10088 | other_ = at::functionalization::impl::from_functional_tensor(other); |
10089 | } else { |
10090 | other_ = other; |
10091 | } |
10092 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10093 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
10094 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10095 | TORCH_INTERNAL_ASSERT(false, |
10096 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10097 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10098 | } else { |
10099 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10100 | at::AutoDispatchSkipFunctionalize guard; |
10101 | at::Tensor tmp_output = at::_ops::subtract__Tensor::call(self_, other_, alpha); |
10102 | return self;; |
10103 | } |
10104 | } else { |
10105 | at::Tensor tmp_output; |
10106 | { |
10107 | at::AutoDispatchSkipFunctionalize guard; |
10108 | tmp_output = at::_ops::subtract_Tensor::call(self_, other_, alpha); |
10109 | } |
10110 | at::functionalization::impl::replace_(self, tmp_output); |
10111 | at::functionalization::impl::commit_update(self); |
10112 | at::functionalization::impl::sync(self); |
10113 | return self; |
10114 | } |
10115 | } |
10116 | |
10117 | at::Tensor & sparse_sampled_addmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
10118 | if (false) { |
10119 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10120 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10121 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10122 | auto self_meta = to_meta(self); |
10123 | auto mat1_meta = to_meta(mat1); |
10124 | auto mat2_meta = to_meta(mat2); |
10125 | auto out_meta = to_meta(out); |
10126 | at::AutoDispatchSkipFunctionalize func_guard; |
10127 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10128 | at::_ops::sparse_sampled_addmm_out::call(self_meta, mat1_meta, mat2_meta, beta, alpha, out_meta); |
10129 | } |
10130 | |
10131 | at::Tensor self_; |
10132 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10133 | at::functionalization::impl::sync(self); |
10134 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10135 | } else { |
10136 | self_ = self; |
10137 | } |
10138 | |
10139 | at::Tensor mat1_; |
10140 | if (at::functionalization::impl::isFunctionalTensor(mat1)) { |
10141 | at::functionalization::impl::sync(mat1); |
10142 | mat1_ = at::functionalization::impl::from_functional_tensor(mat1); |
10143 | } else { |
10144 | mat1_ = mat1; |
10145 | } |
10146 | |
10147 | at::Tensor mat2_; |
10148 | if (at::functionalization::impl::isFunctionalTensor(mat2)) { |
10149 | at::functionalization::impl::sync(mat2); |
10150 | mat2_ = at::functionalization::impl::from_functional_tensor(mat2); |
10151 | } else { |
10152 | mat2_ = mat2; |
10153 | } |
10154 | |
10155 | at::Tensor out_; |
10156 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10157 | at::functionalization::impl::sync(out); |
10158 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10159 | } else { |
10160 | out_ = out; |
10161 | } |
10162 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10163 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) { |
10164 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10165 | TORCH_INTERNAL_ASSERT(false, |
10166 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10167 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10168 | } else { |
10169 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10170 | at::AutoDispatchSkipFunctionalize guard; |
10171 | at::Tensor tmp_output = at::_ops::sparse_sampled_addmm_out::call(self_, mat1_, mat2_, beta, alpha, out_); |
10172 | return out;; |
10173 | } |
10174 | } else { |
10175 | at::Tensor tmp_output; |
10176 | { |
10177 | at::AutoDispatchSkipFunctionalize guard; |
10178 | tmp_output = at::_ops::sparse_sampled_addmm::call(self_, mat1_, mat2_, beta, alpha); |
10179 | } |
10180 | at::functionalization::impl::replace_(out, tmp_output); |
10181 | at::functionalization::impl::commit_update(out); |
10182 | at::functionalization::impl::sync(out); |
10183 | return out; |
10184 | } |
10185 | } |
10186 | |
10187 | at::Tensor & _addmm_activation_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) { |
10188 | if (false) { |
10189 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10190 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10191 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10192 | auto self_meta = to_meta(self); |
10193 | auto mat1_meta = to_meta(mat1); |
10194 | auto mat2_meta = to_meta(mat2); |
10195 | auto out_meta = to_meta(out); |
10196 | at::AutoDispatchSkipFunctionalize func_guard; |
10197 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10198 | at::_ops::_addmm_activation_out::call(self_meta, mat1_meta, mat2_meta, beta, alpha, use_gelu, out_meta); |
10199 | } |
10200 | |
10201 | at::Tensor self_; |
10202 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10203 | at::functionalization::impl::sync(self); |
10204 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10205 | } else { |
10206 | self_ = self; |
10207 | } |
10208 | |
10209 | at::Tensor mat1_; |
10210 | if (at::functionalization::impl::isFunctionalTensor(mat1)) { |
10211 | at::functionalization::impl::sync(mat1); |
10212 | mat1_ = at::functionalization::impl::from_functional_tensor(mat1); |
10213 | } else { |
10214 | mat1_ = mat1; |
10215 | } |
10216 | |
10217 | at::Tensor mat2_; |
10218 | if (at::functionalization::impl::isFunctionalTensor(mat2)) { |
10219 | at::functionalization::impl::sync(mat2); |
10220 | mat2_ = at::functionalization::impl::from_functional_tensor(mat2); |
10221 | } else { |
10222 | mat2_ = mat2; |
10223 | } |
10224 | |
10225 | at::Tensor out_; |
10226 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10227 | at::functionalization::impl::sync(out); |
10228 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10229 | } else { |
10230 | out_ = out; |
10231 | } |
10232 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10233 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) { |
10234 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10235 | TORCH_INTERNAL_ASSERT(false, |
10236 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10237 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10238 | } else { |
10239 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10240 | at::AutoDispatchSkipFunctionalize guard; |
10241 | at::Tensor tmp_output = at::_ops::_addmm_activation_out::call(self_, mat1_, mat2_, beta, alpha, use_gelu, out_); |
10242 | return out;; |
10243 | } |
10244 | } else { |
10245 | at::Tensor tmp_output; |
10246 | { |
10247 | at::AutoDispatchSkipFunctionalize guard; |
10248 | tmp_output = at::_ops::_addmm_activation::call(self_, mat1_, mat2_, beta, alpha, use_gelu); |
10249 | } |
10250 | at::functionalization::impl::replace_(out, tmp_output); |
10251 | at::functionalization::impl::commit_update(out); |
10252 | at::functionalization::impl::sync(out); |
10253 | return out; |
10254 | } |
10255 | } |
10256 | |
10257 | at::Tensor & _to_dense_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
10258 | if (false) { |
10259 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10260 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10261 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10262 | auto self_meta = to_meta(self); |
10263 | auto out_meta = to_meta(out); |
10264 | at::AutoDispatchSkipFunctionalize func_guard; |
10265 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10266 | at::_ops::_to_dense_out::call(self_meta, dtype, out_meta); |
10267 | } |
10268 | |
10269 | at::Tensor self_; |
10270 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10271 | at::functionalization::impl::sync(self); |
10272 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10273 | } else { |
10274 | self_ = self; |
10275 | } |
10276 | |
10277 | at::Tensor out_; |
10278 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10279 | at::functionalization::impl::sync(out); |
10280 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10281 | } else { |
10282 | out_ = out; |
10283 | } |
10284 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10285 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10286 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10287 | TORCH_INTERNAL_ASSERT(false, |
10288 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10289 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10290 | } else { |
10291 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10292 | at::AutoDispatchSkipFunctionalize guard; |
10293 | at::Tensor tmp_output = at::_ops::_to_dense_out::call(self_, dtype, out_); |
10294 | return out;; |
10295 | } |
10296 | } else { |
10297 | at::Tensor tmp_output; |
10298 | { |
10299 | at::AutoDispatchSkipFunctionalize guard; |
10300 | tmp_output = at::_ops::_to_dense::call(self_, dtype); |
10301 | } |
10302 | at::functionalization::impl::replace_(out, tmp_output); |
10303 | at::functionalization::impl::commit_update(out); |
10304 | at::functionalization::impl::sync(out); |
10305 | return out; |
10306 | } |
10307 | } |
10308 | |
10309 | at::Tensor & _coalesced_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced, at::Tensor & out) { |
10310 | if (false) { |
10311 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10312 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10313 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10314 | auto self_meta = to_meta(self); |
10315 | auto out_meta = to_meta(out); |
10316 | at::AutoDispatchSkipFunctionalize func_guard; |
10317 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10318 | at::_ops::_coalesced_out::call(self_meta, coalesced, out_meta); |
10319 | } |
10320 | |
10321 | at::Tensor self_; |
10322 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10323 | at::functionalization::impl::sync(self); |
10324 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10325 | } else { |
10326 | self_ = self; |
10327 | } |
10328 | |
10329 | at::Tensor out_; |
10330 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10331 | at::functionalization::impl::sync(out); |
10332 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10333 | } else { |
10334 | out_ = out; |
10335 | } |
10336 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10337 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10338 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10339 | TORCH_INTERNAL_ASSERT(false, |
10340 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10341 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10342 | } else { |
10343 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10344 | at::AutoDispatchSkipFunctionalize guard; |
10345 | at::Tensor tmp_output = at::_ops::_coalesced_out::call(self_, coalesced, out_); |
10346 | return out;; |
10347 | } |
10348 | } else { |
10349 | at::Tensor tmp_output; |
10350 | { |
10351 | at::AutoDispatchSkipFunctionalize guard; |
10352 | tmp_output = at::_ops::_coalesced::call(self_, coalesced); |
10353 | } |
10354 | at::functionalization::impl::replace_(out, tmp_output); |
10355 | at::functionalization::impl::commit_update(out); |
10356 | at::functionalization::impl::sync(out); |
10357 | return out; |
10358 | } |
10359 | } |
10360 | |
10361 | at::Tensor & _coalesced_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool coalesced) { |
10362 | if (true) { |
10363 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10364 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10365 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10366 | auto self_meta = to_meta(self); |
10367 | at::AutoDispatchSkipFunctionalize func_guard; |
10368 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10369 | at::_ops::_coalesced_::call(self_meta, coalesced); |
10370 | } |
10371 | |
10372 | at::Tensor self_; |
10373 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10374 | at::functionalization::impl::sync(self); |
10375 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10376 | } else { |
10377 | self_ = self; |
10378 | } |
10379 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10380 | if ((false)) { |
10381 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10382 | TORCH_INTERNAL_ASSERT(false, |
10383 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10384 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10385 | } else { |
10386 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10387 | at::AutoDispatchSkipFunctionalize guard; |
10388 | at::Tensor tmp_output = at::_ops::_coalesced_::call(self_, coalesced); |
10389 | return self;; |
10390 | } |
10391 | } else { |
10392 | at::Tensor tmp_output; |
10393 | { |
10394 | at::AutoDispatchSkipFunctionalize guard; |
10395 | tmp_output = at::_ops::_coalesced::call(self_, coalesced); |
10396 | } |
10397 | at::functionalization::impl::replace_(self, tmp_output); |
10398 | at::functionalization::impl::commit_update(self); |
10399 | at::functionalization::impl::sync(self); |
10400 | return self; |
10401 | } |
10402 | } |
10403 | |
10404 | at::Tensor & to_sparse_csr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
10405 | if (false) { |
10406 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10407 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10408 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10409 | auto self_meta = to_meta(self); |
10410 | auto out_meta = to_meta(out); |
10411 | at::AutoDispatchSkipFunctionalize func_guard; |
10412 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10413 | at::_ops::to_sparse_csr_out::call(self_meta, dense_dim, out_meta); |
10414 | } |
10415 | |
10416 | at::Tensor self_; |
10417 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10418 | at::functionalization::impl::sync(self); |
10419 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10420 | } else { |
10421 | self_ = self; |
10422 | } |
10423 | |
10424 | at::Tensor out_; |
10425 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10426 | at::functionalization::impl::sync(out); |
10427 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10428 | } else { |
10429 | out_ = out; |
10430 | } |
10431 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10432 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10433 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10434 | TORCH_INTERNAL_ASSERT(false, |
10435 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10436 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10437 | } else { |
10438 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10439 | at::AutoDispatchSkipFunctionalize guard; |
10440 | at::Tensor tmp_output = at::_ops::to_sparse_csr_out::call(self_, dense_dim, out_); |
10441 | return out;; |
10442 | } |
10443 | } else { |
10444 | at::Tensor tmp_output; |
10445 | { |
10446 | at::AutoDispatchSkipFunctionalize guard; |
10447 | tmp_output = at::_ops::to_sparse_csr::call(self_, dense_dim); |
10448 | } |
10449 | at::functionalization::impl::replace_(out, tmp_output); |
10450 | at::functionalization::impl::commit_update(out); |
10451 | at::functionalization::impl::sync(out); |
10452 | return out; |
10453 | } |
10454 | } |
10455 | |
10456 | at::Tensor & to_sparse_csc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
10457 | if (false) { |
10458 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10459 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10460 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10461 | auto self_meta = to_meta(self); |
10462 | auto out_meta = to_meta(out); |
10463 | at::AutoDispatchSkipFunctionalize func_guard; |
10464 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10465 | at::_ops::to_sparse_csc_out::call(self_meta, dense_dim, out_meta); |
10466 | } |
10467 | |
10468 | at::Tensor self_; |
10469 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10470 | at::functionalization::impl::sync(self); |
10471 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10472 | } else { |
10473 | self_ = self; |
10474 | } |
10475 | |
10476 | at::Tensor out_; |
10477 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10478 | at::functionalization::impl::sync(out); |
10479 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10480 | } else { |
10481 | out_ = out; |
10482 | } |
10483 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10484 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10485 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10486 | TORCH_INTERNAL_ASSERT(false, |
10487 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10488 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10489 | } else { |
10490 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10491 | at::AutoDispatchSkipFunctionalize guard; |
10492 | at::Tensor tmp_output = at::_ops::to_sparse_csc_out::call(self_, dense_dim, out_); |
10493 | return out;; |
10494 | } |
10495 | } else { |
10496 | at::Tensor tmp_output; |
10497 | { |
10498 | at::AutoDispatchSkipFunctionalize guard; |
10499 | tmp_output = at::_ops::to_sparse_csc::call(self_, dense_dim); |
10500 | } |
10501 | at::functionalization::impl::replace_(out, tmp_output); |
10502 | at::functionalization::impl::commit_update(out); |
10503 | at::functionalization::impl::sync(out); |
10504 | return out; |
10505 | } |
10506 | } |
10507 | |
10508 | at::Tensor & to_sparse_bsc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) { |
10509 | if (false) { |
10510 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10511 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10512 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10513 | auto self_meta = to_meta(self); |
10514 | auto out_meta = to_meta(out); |
10515 | at::AutoDispatchSkipFunctionalize func_guard; |
10516 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10517 | at::_ops::to_sparse_bsc_out::call(self_meta, blocksize, dense_dim, out_meta); |
10518 | } |
10519 | |
10520 | at::Tensor self_; |
10521 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10522 | at::functionalization::impl::sync(self); |
10523 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10524 | } else { |
10525 | self_ = self; |
10526 | } |
10527 | |
10528 | at::Tensor out_; |
10529 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10530 | at::functionalization::impl::sync(out); |
10531 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10532 | } else { |
10533 | out_ = out; |
10534 | } |
10535 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10536 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10537 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10538 | TORCH_INTERNAL_ASSERT(false, |
10539 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10540 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10541 | } else { |
10542 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10543 | at::AutoDispatchSkipFunctionalize guard; |
10544 | at::Tensor tmp_output = at::_ops::to_sparse_bsc_out::call(self_, blocksize, dense_dim, out_); |
10545 | return out;; |
10546 | } |
10547 | } else { |
10548 | at::Tensor tmp_output; |
10549 | { |
10550 | at::AutoDispatchSkipFunctionalize guard; |
10551 | tmp_output = at::_ops::to_sparse_bsc::call(self_, blocksize, dense_dim); |
10552 | } |
10553 | at::functionalization::impl::replace_(out, tmp_output); |
10554 | at::functionalization::impl::commit_update(out); |
10555 | at::functionalization::impl::sync(out); |
10556 | return out; |
10557 | } |
10558 | } |
10559 | |
10560 | at::Tensor & quantize_per_tensor_dynamic_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out) { |
10561 | if (false) { |
10562 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10563 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10564 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10565 | auto self_meta = to_meta(self); |
10566 | auto out_meta = to_meta(out); |
10567 | at::AutoDispatchSkipFunctionalize func_guard; |
10568 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10569 | at::_ops::quantize_per_tensor_dynamic_out::call(self_meta, dtype, reduce_range, out_meta); |
10570 | } |
10571 | |
10572 | at::Tensor self_; |
10573 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10574 | at::functionalization::impl::sync(self); |
10575 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10576 | } else { |
10577 | self_ = self; |
10578 | } |
10579 | |
10580 | at::Tensor out_; |
10581 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10582 | at::functionalization::impl::sync(out); |
10583 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10584 | } else { |
10585 | out_ = out; |
10586 | } |
10587 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10588 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10589 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10590 | TORCH_INTERNAL_ASSERT(false, |
10591 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10592 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10593 | } else { |
10594 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10595 | at::AutoDispatchSkipFunctionalize guard; |
10596 | at::Tensor tmp_output = at::_ops::quantize_per_tensor_dynamic_out::call(self_, dtype, reduce_range, out_); |
10597 | return out;; |
10598 | } |
10599 | } else { |
10600 | at::Tensor tmp_output; |
10601 | { |
10602 | at::AutoDispatchSkipFunctionalize guard; |
10603 | tmp_output = at::_ops::quantize_per_tensor_dynamic::call(self_, dtype, reduce_range); |
10604 | } |
10605 | at::functionalization::impl::replace_(out, tmp_output); |
10606 | at::functionalization::impl::commit_update(out); |
10607 | at::functionalization::impl::sync(out); |
10608 | return out; |
10609 | } |
10610 | } |
10611 | |
10612 | at::Tensor & quantize_per_tensor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) { |
10613 | if (false) { |
10614 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10615 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10616 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10617 | auto self_meta = to_meta(self); |
10618 | auto out_meta = to_meta(out); |
10619 | at::AutoDispatchSkipFunctionalize func_guard; |
10620 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10621 | at::_ops::quantize_per_tensor_out::call(self_meta, scale, zero_point, dtype, out_meta); |
10622 | } |
10623 | |
10624 | at::Tensor self_; |
10625 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10626 | at::functionalization::impl::sync(self); |
10627 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10628 | } else { |
10629 | self_ = self; |
10630 | } |
10631 | |
10632 | at::Tensor out_; |
10633 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10634 | at::functionalization::impl::sync(out); |
10635 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10636 | } else { |
10637 | out_ = out; |
10638 | } |
10639 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10640 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10641 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10642 | TORCH_INTERNAL_ASSERT(false, |
10643 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10644 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10645 | } else { |
10646 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10647 | at::AutoDispatchSkipFunctionalize guard; |
10648 | at::Tensor tmp_output = at::_ops::quantize_per_tensor_out::call(self_, scale, zero_point, dtype, out_); |
10649 | return out;; |
10650 | } |
10651 | } else { |
10652 | at::Tensor tmp_output; |
10653 | { |
10654 | at::AutoDispatchSkipFunctionalize guard; |
10655 | tmp_output = at::_ops::quantize_per_tensor::call(self_, scale, zero_point, dtype); |
10656 | } |
10657 | at::functionalization::impl::replace_(out, tmp_output); |
10658 | at::functionalization::impl::commit_update(out); |
10659 | at::functionalization::impl::sync(out); |
10660 | return out; |
10661 | } |
10662 | } |
10663 | |
10664 | at::Tensor & quantize_per_tensor_out_tensor_qparams_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) { |
10665 | if (false) { |
10666 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10667 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10668 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10669 | auto self_meta = to_meta(self); |
10670 | auto scale_meta = to_meta(scale); |
10671 | auto zero_point_meta = to_meta(zero_point); |
10672 | auto out_meta = to_meta(out); |
10673 | at::AutoDispatchSkipFunctionalize func_guard; |
10674 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10675 | at::_ops::quantize_per_tensor_tensor_qparams_out::call(self_meta, scale_meta, zero_point_meta, dtype, out_meta); |
10676 | } |
10677 | |
10678 | at::Tensor self_; |
10679 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10680 | at::functionalization::impl::sync(self); |
10681 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10682 | } else { |
10683 | self_ = self; |
10684 | } |
10685 | |
10686 | at::Tensor scale_; |
10687 | if (at::functionalization::impl::isFunctionalTensor(scale)) { |
10688 | at::functionalization::impl::sync(scale); |
10689 | scale_ = at::functionalization::impl::from_functional_tensor(scale); |
10690 | } else { |
10691 | scale_ = scale; |
10692 | } |
10693 | |
10694 | at::Tensor zero_point_; |
10695 | if (at::functionalization::impl::isFunctionalTensor(zero_point)) { |
10696 | at::functionalization::impl::sync(zero_point); |
10697 | zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point); |
10698 | } else { |
10699 | zero_point_ = zero_point; |
10700 | } |
10701 | |
10702 | at::Tensor out_; |
10703 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10704 | at::functionalization::impl::sync(out); |
10705 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10706 | } else { |
10707 | out_ = out; |
10708 | } |
10709 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10710 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scale) || at::functionalization::impl::isFunctionalTensor(zero_point))) { |
10711 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10712 | TORCH_INTERNAL_ASSERT(false, |
10713 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10714 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10715 | } else { |
10716 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10717 | at::AutoDispatchSkipFunctionalize guard; |
10718 | at::Tensor tmp_output = at::_ops::quantize_per_tensor_tensor_qparams_out::call(self_, scale_, zero_point_, dtype, out_); |
10719 | return out;; |
10720 | } |
10721 | } else { |
10722 | at::Tensor tmp_output; |
10723 | { |
10724 | at::AutoDispatchSkipFunctionalize guard; |
10725 | tmp_output = at::_ops::quantize_per_tensor_tensor_qparams::call(self_, scale_, zero_point_, dtype); |
10726 | } |
10727 | at::functionalization::impl::replace_(out, tmp_output); |
10728 | at::functionalization::impl::commit_update(out); |
10729 | at::functionalization::impl::sync(out); |
10730 | return out; |
10731 | } |
10732 | } |
10733 | |
10734 | void quantize_per_tensor_out_tensors_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) { |
10735 | if (false) { |
10736 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10737 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10738 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10739 | auto tensors_meta = to_meta(tensors); |
10740 | auto scales_meta = to_meta(scales); |
10741 | auto zero_points_meta = to_meta(zero_points); |
10742 | auto out_meta = to_meta(out); |
10743 | at::AutoDispatchSkipFunctionalize func_guard; |
10744 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10745 | at::_ops::quantize_per_tensor_tensors_out::call(tensors_meta, scales_meta, zero_points_meta, dtype, out_meta); |
10746 | } |
10747 | |
10748 | ::std::vector<at::Tensor> tensors_; |
10749 | if (at::functionalization::impl::isFunctionalTensor(tensors)) { |
10750 | at::functionalization::impl::sync(tensors); |
10751 | tensors_ = at::functionalization::impl::from_functional_tensor(tensors); |
10752 | } else { |
10753 | tensors_ = tensors.vec(); |
10754 | } |
10755 | |
10756 | at::Tensor scales_; |
10757 | if (at::functionalization::impl::isFunctionalTensor(scales)) { |
10758 | at::functionalization::impl::sync(scales); |
10759 | scales_ = at::functionalization::impl::from_functional_tensor(scales); |
10760 | } else { |
10761 | scales_ = scales; |
10762 | } |
10763 | |
10764 | at::Tensor zero_points_; |
10765 | if (at::functionalization::impl::isFunctionalTensor(zero_points)) { |
10766 | at::functionalization::impl::sync(zero_points); |
10767 | zero_points_ = at::functionalization::impl::from_functional_tensor(zero_points); |
10768 | } else { |
10769 | zero_points_ = zero_points; |
10770 | } |
10771 | |
10772 | ::std::vector<at::Tensor> out_; |
10773 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10774 | at::functionalization::impl::sync(out); |
10775 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10776 | } else { |
10777 | out_ = out.vec(); |
10778 | } |
10779 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10780 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors) || at::functionalization::impl::isFunctionalTensor(scales) || at::functionalization::impl::isFunctionalTensor(zero_points))) { |
10781 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10782 | TORCH_INTERNAL_ASSERT(false, |
10783 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10784 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10785 | } else { |
10786 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10787 | at::AutoDispatchSkipFunctionalize guard; |
10788 | at::_ops::quantize_per_tensor_tensors_out::call(tensors_, scales_, zero_points_, dtype, out_); |
10789 | ; |
10790 | } |
10791 | } else { |
10792 | ::std::vector<at::Tensor> tmp_output; |
10793 | { |
10794 | at::AutoDispatchSkipFunctionalize guard; |
10795 | tmp_output = at::_ops::quantize_per_tensor_tensors::call(tensors_, scales_, zero_points_, dtype); |
10796 | } |
10797 | at::functionalization::impl::replace_(out, tmp_output); |
10798 | at::functionalization::impl::commit_update(out); |
10799 | at::functionalization::impl::sync(out); |
10800 | |
10801 | } |
10802 | } |
10803 | |
10804 | at::Tensor & quantize_per_channel_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out) { |
10805 | if (false) { |
10806 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10807 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10808 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10809 | auto self_meta = to_meta(self); |
10810 | auto scales_meta = to_meta(scales); |
10811 | auto zero_points_meta = to_meta(zero_points); |
10812 | auto out_meta = to_meta(out); |
10813 | at::AutoDispatchSkipFunctionalize func_guard; |
10814 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10815 | at::_ops::quantize_per_channel_out::call(self_meta, scales_meta, zero_points_meta, axis, dtype, out_meta); |
10816 | } |
10817 | |
10818 | at::Tensor self_; |
10819 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10820 | at::functionalization::impl::sync(self); |
10821 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10822 | } else { |
10823 | self_ = self; |
10824 | } |
10825 | |
10826 | at::Tensor scales_; |
10827 | if (at::functionalization::impl::isFunctionalTensor(scales)) { |
10828 | at::functionalization::impl::sync(scales); |
10829 | scales_ = at::functionalization::impl::from_functional_tensor(scales); |
10830 | } else { |
10831 | scales_ = scales; |
10832 | } |
10833 | |
10834 | at::Tensor zero_points_; |
10835 | if (at::functionalization::impl::isFunctionalTensor(zero_points)) { |
10836 | at::functionalization::impl::sync(zero_points); |
10837 | zero_points_ = at::functionalization::impl::from_functional_tensor(zero_points); |
10838 | } else { |
10839 | zero_points_ = zero_points; |
10840 | } |
10841 | |
10842 | at::Tensor out_; |
10843 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10844 | at::functionalization::impl::sync(out); |
10845 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10846 | } else { |
10847 | out_ = out; |
10848 | } |
10849 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10850 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scales) || at::functionalization::impl::isFunctionalTensor(zero_points))) { |
10851 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10852 | TORCH_INTERNAL_ASSERT(false, |
10853 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10854 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10855 | } else { |
10856 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10857 | at::AutoDispatchSkipFunctionalize guard; |
10858 | at::Tensor tmp_output = at::_ops::quantize_per_channel_out::call(self_, scales_, zero_points_, axis, dtype, out_); |
10859 | return out;; |
10860 | } |
10861 | } else { |
10862 | at::Tensor tmp_output; |
10863 | { |
10864 | at::AutoDispatchSkipFunctionalize guard; |
10865 | tmp_output = at::_ops::quantize_per_channel::call(self_, scales_, zero_points_, axis, dtype); |
10866 | } |
10867 | at::functionalization::impl::replace_(out, tmp_output); |
10868 | at::functionalization::impl::commit_update(out); |
10869 | at::functionalization::impl::sync(out); |
10870 | return out; |
10871 | } |
10872 | } |
10873 | |
10874 | at::Tensor & _make_per_channel_quantized_tensor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out) { |
10875 | if (false) { |
10876 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10877 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10878 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10879 | auto self_meta = to_meta(self); |
10880 | auto scale_meta = to_meta(scale); |
10881 | auto zero_point_meta = to_meta(zero_point); |
10882 | auto out_meta = to_meta(out); |
10883 | at::AutoDispatchSkipFunctionalize func_guard; |
10884 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10885 | at::_ops::_make_per_channel_quantized_tensor_out::call(self_meta, scale_meta, zero_point_meta, axis, out_meta); |
10886 | } |
10887 | |
10888 | at::Tensor self_; |
10889 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10890 | at::functionalization::impl::sync(self); |
10891 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10892 | } else { |
10893 | self_ = self; |
10894 | } |
10895 | |
10896 | at::Tensor scale_; |
10897 | if (at::functionalization::impl::isFunctionalTensor(scale)) { |
10898 | at::functionalization::impl::sync(scale); |
10899 | scale_ = at::functionalization::impl::from_functional_tensor(scale); |
10900 | } else { |
10901 | scale_ = scale; |
10902 | } |
10903 | |
10904 | at::Tensor zero_point_; |
10905 | if (at::functionalization::impl::isFunctionalTensor(zero_point)) { |
10906 | at::functionalization::impl::sync(zero_point); |
10907 | zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point); |
10908 | } else { |
10909 | zero_point_ = zero_point; |
10910 | } |
10911 | |
10912 | at::Tensor out_; |
10913 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10914 | at::functionalization::impl::sync(out); |
10915 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10916 | } else { |
10917 | out_ = out; |
10918 | } |
10919 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10920 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scale) || at::functionalization::impl::isFunctionalTensor(zero_point))) { |
10921 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10922 | TORCH_INTERNAL_ASSERT(false, |
10923 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10924 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10925 | } else { |
10926 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10927 | at::AutoDispatchSkipFunctionalize guard; |
10928 | at::Tensor tmp_output = at::_ops::_make_per_channel_quantized_tensor_out::call(self_, scale_, zero_point_, axis, out_); |
10929 | return out;; |
10930 | } |
10931 | } else { |
10932 | at::Tensor tmp_output; |
10933 | { |
10934 | at::AutoDispatchSkipFunctionalize guard; |
10935 | tmp_output = at::_ops::_make_per_channel_quantized_tensor::call(self_, scale_, zero_point_, axis); |
10936 | } |
10937 | at::functionalization::impl::replace_(out, tmp_output); |
10938 | at::functionalization::impl::commit_update(out); |
10939 | at::functionalization::impl::sync(out); |
10940 | return out; |
10941 | } |
10942 | } |
10943 | |
10944 | ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { |
10945 | if (false) { |
10946 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10947 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10948 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10949 | auto self_meta = to_meta(self); |
10950 | auto out0_meta = to_meta(out0); |
10951 | auto out1_meta = to_meta(out1); |
10952 | at::AutoDispatchSkipFunctionalize func_guard; |
10953 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10954 | at::_ops::fake_quantize_per_tensor_affine_cachemask_out::call(self_meta, scale, zero_point, quant_min, quant_max, out0_meta, out1_meta); |
10955 | } |
10956 | |
10957 | at::Tensor self_; |
10958 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10959 | at::functionalization::impl::sync(self); |
10960 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10961 | } else { |
10962 | self_ = self; |
10963 | } |
10964 | |
10965 | at::Tensor out0_; |
10966 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
10967 | at::functionalization::impl::sync(out0); |
10968 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
10969 | } else { |
10970 | out0_ = out0; |
10971 | } |
10972 | |
10973 | at::Tensor out1_; |
10974 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
10975 | at::functionalization::impl::sync(out1); |
10976 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
10977 | } else { |
10978 | out1_ = out1; |
10979 | } |
10980 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
10981 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10982 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10983 | TORCH_INTERNAL_ASSERT(false, |
10984 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10985 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10986 | } else { |
10987 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10988 | at::AutoDispatchSkipFunctionalize guard; |
10989 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::fake_quantize_per_tensor_affine_cachemask_out::call(self_, scale, zero_point, quant_min, quant_max, out0_, out1_); |
10990 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
10991 | } |
10992 | } else { |
10993 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
10994 | { |
10995 | at::AutoDispatchSkipFunctionalize guard; |
10996 | tmp_output = at::_ops::fake_quantize_per_tensor_affine_cachemask::call(self_, scale, zero_point, quant_min, quant_max); |
10997 | } |
10998 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
10999 | at::functionalization::impl::commit_update(out0); |
11000 | at::functionalization::impl::sync(out0); |
11001 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
11002 | at::functionalization::impl::commit_update(out1); |
11003 | at::functionalization::impl::sync(out1); |
11004 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
11005 | } |
11006 | } |
11007 | |
11008 | ::std::tuple<at::Tensor &,at::Tensor &> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { |
11009 | if (false) { |
11010 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11011 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11012 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11013 | auto self_meta = to_meta(self); |
11014 | auto scale_meta = to_meta(scale); |
11015 | auto zero_point_meta = to_meta(zero_point); |
11016 | auto fake_quant_enabled_meta = to_meta(fake_quant_enabled); |
11017 | auto out0_meta = to_meta(out0); |
11018 | auto out1_meta = to_meta(out1); |
11019 | at::AutoDispatchSkipFunctionalize func_guard; |
11020 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11021 | at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::call(self_meta, scale_meta, zero_point_meta, fake_quant_enabled_meta, quant_min, quant_max, out0_meta, out1_meta); |
11022 | } |
11023 | |
11024 | at::Tensor self_; |
11025 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11026 | at::functionalization::impl::sync(self); |
11027 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11028 | } else { |
11029 | self_ = self; |
11030 | } |
11031 | |
11032 | at::Tensor scale_; |
11033 | if (at::functionalization::impl::isFunctionalTensor(scale)) { |
11034 | at::functionalization::impl::sync(scale); |
11035 | scale_ = at::functionalization::impl::from_functional_tensor(scale); |
11036 | } else { |
11037 | scale_ = scale; |
11038 | } |
11039 | |
11040 | at::Tensor zero_point_; |
11041 | if (at::functionalization::impl::isFunctionalTensor(zero_point)) { |
11042 | at::functionalization::impl::sync(zero_point); |
11043 | zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point); |
11044 | } else { |
11045 | zero_point_ = zero_point; |
11046 | } |
11047 | |
11048 | at::Tensor fake_quant_enabled_; |
11049 | if (at::functionalization::impl::isFunctionalTensor(fake_quant_enabled)) { |
11050 | at::functionalization::impl::sync(fake_quant_enabled); |
11051 | fake_quant_enabled_ = at::functionalization::impl::from_functional_tensor(fake_quant_enabled); |
11052 | } else { |
11053 | fake_quant_enabled_ = fake_quant_enabled; |
11054 | } |
11055 | |
11056 | at::Tensor out0_; |
11057 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
11058 | at::functionalization::impl::sync(out0); |
11059 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
11060 | } else { |
11061 | out0_ = out0; |
11062 | } |
11063 | |
11064 | at::Tensor out1_; |
11065 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
11066 | at::functionalization::impl::sync(out1); |
11067 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
11068 | } else { |
11069 | out1_ = out1; |
11070 | } |
11071 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
11072 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scale) || at::functionalization::impl::isFunctionalTensor(zero_point) || at::functionalization::impl::isFunctionalTensor(fake_quant_enabled))) { |
11073 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11074 | TORCH_INTERNAL_ASSERT(false, |
11075 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11076 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11077 | } else { |
11078 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11079 | at::AutoDispatchSkipFunctionalize guard; |
11080 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::call(self_, scale_, zero_point_, fake_quant_enabled_, quant_min, quant_max, out0_, out1_); |
11081 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
11082 | } |
11083 | } else { |
11084 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
11085 | { |
11086 | at::AutoDispatchSkipFunctionalize guard; |
11087 | tmp_output = at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(self_, scale_, zero_point_, fake_quant_enabled_, quant_min, quant_max); |
11088 | } |
11089 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
11090 | at::functionalization::impl::commit_update(out0); |
11091 | at::functionalization::impl::sync(out0); |
11092 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
11093 | at::functionalization::impl::commit_update(out1); |
11094 | at::functionalization::impl::sync(out1); |
11095 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
11096 | } |
11097 | } |
11098 | |
11099 | at::Tensor & _fake_quantize_learnable_per_tensor_affine_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) { |
11100 | if (false) { |
11101 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11102 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11103 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11104 | auto self_meta = to_meta(self); |
11105 | auto scale_meta = to_meta(scale); |
11106 | auto zero_point_meta = to_meta(zero_point); |
11107 | auto out_meta = to_meta(out); |
11108 | at::AutoDispatchSkipFunctionalize func_guard; |
11109 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11110 | at::_ops::_fake_quantize_learnable_per_tensor_affine_out::call(self_meta, scale_meta, zero_point_meta, quant_min, quant_max, grad_factor, out_meta); |
11111 | } |
11112 | |
11113 | at::Tensor self_; |
11114 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11115 | at::functionalization::impl::sync(self); |
11116 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11117 | } else { |
11118 | self_ = self; |
11119 | } |
11120 | |
11121 | at::Tensor scale_; |
11122 | if (at::functionalization::impl::isFunctionalTensor(scale)) { |
11123 | at::functionalization::impl::sync(scale); |
11124 | scale_ = at::functionalization::impl::from_functional_tensor(scale); |
11125 | } else { |
11126 | scale_ = scale; |
11127 | } |
11128 | |
11129 | at::Tensor zero_point_; |
11130 | if (at::functionalization::impl::isFunctionalTensor(zero_point)) { |
11131 | at::functionalization::impl::sync(zero_point); |
11132 | zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point); |
11133 | } else { |
11134 | zero_point_ = zero_point; |
11135 | } |
11136 | |
11137 | at::Tensor out_; |
11138 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11139 | at::functionalization::impl::sync(out); |
11140 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11141 | } else { |
11142 | out_ = out; |
11143 | } |
11144 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11145 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scale) || at::functionalization::impl::isFunctionalTensor(zero_point))) { |
11146 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11147 | TORCH_INTERNAL_ASSERT(false, |
11148 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11149 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11150 | } else { |
11151 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11152 | at::AutoDispatchSkipFunctionalize guard; |
11153 | at::Tensor tmp_output = at::_ops::_fake_quantize_learnable_per_tensor_affine_out::call(self_, scale_, zero_point_, quant_min, quant_max, grad_factor, out_); |
11154 | return out;; |
11155 | } |
11156 | } else { |
11157 | at::Tensor tmp_output; |
11158 | { |
11159 | at::AutoDispatchSkipFunctionalize guard; |
11160 | tmp_output = at::_ops::_fake_quantize_learnable_per_tensor_affine::call(self_, scale_, zero_point_, quant_min, quant_max, grad_factor); |
11161 | } |
11162 | at::functionalization::impl::replace_(out, tmp_output); |
11163 | at::functionalization::impl::commit_update(out); |
11164 | at::functionalization::impl::sync(out); |
11165 | return out; |
11166 | } |
11167 | } |
11168 | |
11169 | ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { |
11170 | if (false) { |
11171 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11172 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11173 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11174 | auto self_meta = to_meta(self); |
11175 | auto scale_meta = to_meta(scale); |
11176 | auto zero_point_meta = to_meta(zero_point); |
11177 | auto out0_meta = to_meta(out0); |
11178 | auto out1_meta = to_meta(out1); |
11179 | at::AutoDispatchSkipFunctionalize func_guard; |
11180 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11181 | at::_ops::fake_quantize_per_channel_affine_cachemask_out::call(self_meta, scale_meta, zero_point_meta, axis, quant_min, quant_max, out0_meta, out1_meta); |
11182 | } |
11183 | |
11184 | at::Tensor self_; |
11185 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11186 | at::functionalization::impl::sync(self); |
11187 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11188 | } else { |
11189 | self_ = self; |
11190 | } |
11191 | |
11192 | at::Tensor scale_; |
11193 | if (at::functionalization::impl::isFunctionalTensor(scale)) { |
11194 | at::functionalization::impl::sync(scale); |
11195 | scale_ = at::functionalization::impl::from_functional_tensor(scale); |
11196 | } else { |
11197 | scale_ = scale; |
11198 | } |
11199 | |
11200 | at::Tensor zero_point_; |
11201 | if (at::functionalization::impl::isFunctionalTensor(zero_point)) { |
11202 | at::functionalization::impl::sync(zero_point); |
11203 | zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point); |
11204 | } else { |
11205 | zero_point_ = zero_point; |
11206 | } |
11207 | |
11208 | at::Tensor out0_; |
11209 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
11210 | at::functionalization::impl::sync(out0); |
11211 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
11212 | } else { |
11213 | out0_ = out0; |
11214 | } |
11215 | |
11216 | at::Tensor out1_; |
11217 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
11218 | at::functionalization::impl::sync(out1); |
11219 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
11220 | } else { |
11221 | out1_ = out1; |
11222 | } |
11223 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
11224 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scale) || at::functionalization::impl::isFunctionalTensor(zero_point))) { |
11225 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11226 | TORCH_INTERNAL_ASSERT(false, |
11227 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11228 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11229 | } else { |
11230 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11231 | at::AutoDispatchSkipFunctionalize guard; |
11232 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::fake_quantize_per_channel_affine_cachemask_out::call(self_, scale_, zero_point_, axis, quant_min, quant_max, out0_, out1_); |
11233 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
11234 | } |
11235 | } else { |
11236 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
11237 | { |
11238 | at::AutoDispatchSkipFunctionalize guard; |
11239 | tmp_output = at::_ops::fake_quantize_per_channel_affine_cachemask::call(self_, scale_, zero_point_, axis, quant_min, quant_max); |
11240 | } |
11241 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
11242 | at::functionalization::impl::commit_update(out0); |
11243 | at::functionalization::impl::sync(out0); |
11244 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
11245 | at::functionalization::impl::commit_update(out1); |
11246 | at::functionalization::impl::sync(out1); |
11247 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
11248 | } |
11249 | } |
11250 | |
11251 | void lstm_mps_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) { |
11252 | if (false) { |
11253 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11254 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11255 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11256 | auto grad_y_meta = to_meta(grad_y); |
11257 | auto grad_hy_meta = to_meta(grad_hy); |
11258 | auto grad_cy_meta = to_meta(grad_cy); |
11259 | auto z_state_meta = to_meta(z_state); |
11260 | auto cell_state_fwd_meta = to_meta(cell_state_fwd); |
11261 | auto input_meta = to_meta(input); |
11262 | auto hx_meta = to_meta(hx); |
11263 | auto params_meta = to_meta(params); |
11264 | auto out0_meta = to_meta(out0); |
11265 | auto out1_meta = to_meta(out1); |
11266 | auto out2_meta = to_meta(out2); |
11267 | at::AutoDispatchSkipFunctionalize func_guard; |
11268 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11269 | at::_ops::lstm_mps_backward_out::call(grad_y_meta, grad_hy_meta, grad_cy_meta, z_state_meta, cell_state_fwd_meta, input_meta, hx_meta, params_meta, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_meta, out1_meta, out2_meta); |
11270 | } |
11271 | |
11272 | at::Tensor grad_y_; |
11273 | if (at::functionalization::impl::isFunctionalTensor(grad_y)) { |
11274 | at::functionalization::impl::sync(grad_y); |
11275 | grad_y_ = at::functionalization::impl::from_functional_tensor(grad_y); |
11276 | } else { |
11277 | grad_y_ = grad_y; |
11278 | } |
11279 | |
11280 | c10::optional<at::Tensor> grad_hy_; |
11281 | if (at::functionalization::impl::isFunctionalTensor(grad_hy)) { |
11282 | at::functionalization::impl::sync(grad_hy); |
11283 | grad_hy_ = at::functionalization::impl::from_functional_tensor(grad_hy); |
11284 | } else { |
11285 | grad_hy_ = grad_hy; |
11286 | } |
11287 | |
11288 | c10::optional<at::Tensor> grad_cy_; |
11289 | if (at::functionalization::impl::isFunctionalTensor(grad_cy)) { |
11290 | at::functionalization::impl::sync(grad_cy); |
11291 | grad_cy_ = at::functionalization::impl::from_functional_tensor(grad_cy); |
11292 | } else { |
11293 | grad_cy_ = grad_cy; |
11294 | } |
11295 | |
11296 | at::Tensor z_state_; |
11297 | if (at::functionalization::impl::isFunctionalTensor(z_state)) { |
11298 | at::functionalization::impl::sync(z_state); |
11299 | z_state_ = at::functionalization::impl::from_functional_tensor(z_state); |
11300 | } else { |
11301 | z_state_ = z_state; |
11302 | } |
11303 | |
11304 | at::Tensor cell_state_fwd_; |
11305 | if (at::functionalization::impl::isFunctionalTensor(cell_state_fwd)) { |
11306 | at::functionalization::impl::sync(cell_state_fwd); |
11307 | cell_state_fwd_ = at::functionalization::impl::from_functional_tensor(cell_state_fwd); |
11308 | } else { |
11309 | cell_state_fwd_ = cell_state_fwd; |
11310 | } |
11311 | |
11312 | at::Tensor input_; |
11313 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
11314 | at::functionalization::impl::sync(input); |
11315 | input_ = at::functionalization::impl::from_functional_tensor(input); |
11316 | } else { |
11317 | input_ = input; |
11318 | } |
11319 | |
11320 | ::std::vector<at::Tensor> hx_; |
11321 | if (at::functionalization::impl::isFunctionalTensor(hx)) { |
11322 | at::functionalization::impl::sync(hx); |
11323 | hx_ = at::functionalization::impl::from_functional_tensor(hx); |
11324 | } else { |
11325 | hx_ = hx.vec(); |
11326 | } |
11327 | |
11328 | ::std::vector<at::Tensor> params_; |
11329 | if (at::functionalization::impl::isFunctionalTensor(params)) { |
11330 | at::functionalization::impl::sync(params); |
11331 | params_ = at::functionalization::impl::from_functional_tensor(params); |
11332 | } else { |
11333 | params_ = params.vec(); |
11334 | } |
11335 | |
11336 | at::Tensor out0_; |
11337 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
11338 | at::functionalization::impl::sync(out0); |
11339 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
11340 | } else { |
11341 | out0_ = out0; |
11342 | } |
11343 | |
11344 | ::std::vector<at::Tensor> out1_; |
11345 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
11346 | at::functionalization::impl::sync(out1); |
11347 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
11348 | } else { |
11349 | out1_ = out1.vec(); |
11350 | } |
11351 | |
11352 | ::std::vector<at::Tensor> out2_; |
11353 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
11354 | at::functionalization::impl::sync(out2); |
11355 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
11356 | } else { |
11357 | out2_ = out2.vec(); |
11358 | } |
11359 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
11360 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_y) || at::functionalization::impl::isFunctionalTensor(grad_hy) || at::functionalization::impl::isFunctionalTensor(grad_cy) || at::functionalization::impl::isFunctionalTensor(z_state) || at::functionalization::impl::isFunctionalTensor(cell_state_fwd) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(params))) { |
11361 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11362 | TORCH_INTERNAL_ASSERT(false, |
11363 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11364 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11365 | } else { |
11366 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11367 | at::AutoDispatchSkipFunctionalize guard; |
11368 | at::_ops::lstm_mps_backward_out::call(grad_y_, grad_hy_, grad_cy_, z_state_, cell_state_fwd_, input_, hx_, params_, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_, out1_, out2_); |
11369 | ; |
11370 | } |
11371 | } else { |
11372 | ::std::tuple<at::Tensor,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output; |
11373 | { |
11374 | at::AutoDispatchSkipFunctionalize guard; |
11375 | tmp_output = at::_ops::lstm_mps_backward::call(grad_y_, grad_hy_, grad_cy_, z_state_, cell_state_fwd_, input_, hx_, params_, has_biases, num_layers, dropout, train, bidirectional, batch_first); |
11376 | } |
11377 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
11378 | at::functionalization::impl::commit_update(out0); |
11379 | at::functionalization::impl::sync(out0); |
11380 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
11381 | at::functionalization::impl::commit_update(out1); |
11382 | at::functionalization::impl::sync(out1); |
11383 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
11384 | at::functionalization::impl::commit_update(out2); |
11385 | at::functionalization::impl::sync(out2); |
11386 | |
11387 | } |
11388 | } |
11389 | |
11390 | at::Tensor & lift_fresh_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
11391 | if (false) { |
11392 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11393 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11394 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11395 | auto self_meta = to_meta(self); |
11396 | auto out_meta = to_meta(out); |
11397 | at::AutoDispatchSkipFunctionalize func_guard; |
11398 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11399 | at::_ops::lift_fresh_copy_out::call(self_meta, out_meta); |
11400 | } |
11401 | |
11402 | at::Tensor self_; |
11403 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11404 | at::functionalization::impl::sync(self); |
11405 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11406 | } else { |
11407 | self_ = self; |
11408 | } |
11409 | |
11410 | at::Tensor out_; |
11411 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11412 | at::functionalization::impl::sync(out); |
11413 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11414 | } else { |
11415 | out_ = out; |
11416 | } |
11417 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11418 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
11419 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11420 | TORCH_INTERNAL_ASSERT(false, |
11421 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11422 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11423 | } else { |
11424 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11425 | at::AutoDispatchSkipFunctionalize guard; |
11426 | at::Tensor tmp_output = at::_ops::lift_fresh_copy_out::call(self_, out_); |
11427 | return out;; |
11428 | } |
11429 | } else { |
11430 | at::Tensor tmp_output; |
11431 | { |
11432 | at::AutoDispatchSkipFunctionalize guard; |
11433 | tmp_output = at::_ops::lift_fresh_copy::call(self_); |
11434 | } |
11435 | at::functionalization::impl::replace_(out, tmp_output); |
11436 | at::functionalization::impl::commit_update(out); |
11437 | at::functionalization::impl::sync(out); |
11438 | return out; |
11439 | } |
11440 | } |
11441 | |
11442 | at::Tensor & _masked_softmax_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim, at::Tensor & out) { |
11443 | if (false) { |
11444 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11445 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11446 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11447 | auto grad_output_meta = to_meta(grad_output); |
11448 | auto output_meta = to_meta(output); |
11449 | auto mask_meta = to_meta(mask); |
11450 | auto out_meta = to_meta(out); |
11451 | at::AutoDispatchSkipFunctionalize func_guard; |
11452 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11453 | at::_ops::_masked_softmax_backward_out::call(grad_output_meta, output_meta, mask_meta, dim, out_meta); |
11454 | } |
11455 | |
11456 | at::Tensor grad_output_; |
11457 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
11458 | at::functionalization::impl::sync(grad_output); |
11459 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
11460 | } else { |
11461 | grad_output_ = grad_output; |
11462 | } |
11463 | |
11464 | at::Tensor output_; |
11465 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
11466 | at::functionalization::impl::sync(output); |
11467 | output_ = at::functionalization::impl::from_functional_tensor(output); |
11468 | } else { |
11469 | output_ = output; |
11470 | } |
11471 | |
11472 | at::Tensor mask_; |
11473 | if (at::functionalization::impl::isFunctionalTensor(mask)) { |
11474 | at::functionalization::impl::sync(mask); |
11475 | mask_ = at::functionalization::impl::from_functional_tensor(mask); |
11476 | } else { |
11477 | mask_ = mask; |
11478 | } |
11479 | |
11480 | at::Tensor out_; |
11481 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11482 | at::functionalization::impl::sync(out); |
11483 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11484 | } else { |
11485 | out_ = out; |
11486 | } |
11487 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11488 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(mask))) { |
11489 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11490 | TORCH_INTERNAL_ASSERT(false, |
11491 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11492 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11493 | } else { |
11494 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11495 | at::AutoDispatchSkipFunctionalize guard; |
11496 | at::Tensor tmp_output = at::_ops::_masked_softmax_backward_out::call(grad_output_, output_, mask_, dim, out_); |
11497 | return out;; |
11498 | } |
11499 | } else { |
11500 | at::Tensor tmp_output; |
11501 | { |
11502 | at::AutoDispatchSkipFunctionalize guard; |
11503 | tmp_output = at::_ops::_masked_softmax_backward::call(grad_output_, output_, mask_, dim); |
11504 | } |
11505 | at::functionalization::impl::replace_(out, tmp_output); |
11506 | at::functionalization::impl::commit_update(out); |
11507 | at::functionalization::impl::sync(out); |
11508 | return out; |
11509 | } |
11510 | } |
11511 | |
11512 | at::Tensor & put_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out) { |
11513 | if (false) { |
11514 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11515 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11516 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11517 | auto self_meta = to_meta(self); |
11518 | auto index_meta = to_meta(index); |
11519 | auto source_meta = to_meta(source); |
11520 | auto out_meta = to_meta(out); |
11521 | at::AutoDispatchSkipFunctionalize func_guard; |
11522 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11523 | at::_ops::put_out::call(self_meta, index_meta, source_meta, accumulate, out_meta); |
11524 | } |
11525 | |
11526 | at::Tensor self_; |
11527 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11528 | at::functionalization::impl::sync(self); |
11529 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11530 | } else { |
11531 | self_ = self; |
11532 | } |
11533 | |
11534 | at::Tensor index_; |
11535 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
11536 | at::functionalization::impl::sync(index); |
11537 | index_ = at::functionalization::impl::from_functional_tensor(index); |
11538 | } else { |
11539 | index_ = index; |
11540 | } |
11541 | |
11542 | at::Tensor source_; |
11543 | if (at::functionalization::impl::isFunctionalTensor(source)) { |
11544 | at::functionalization::impl::sync(source); |
11545 | source_ = at::functionalization::impl::from_functional_tensor(source); |
11546 | } else { |
11547 | source_ = source; |
11548 | } |
11549 | |
11550 | at::Tensor out_; |
11551 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11552 | at::functionalization::impl::sync(out); |
11553 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11554 | } else { |
11555 | out_ = out; |
11556 | } |
11557 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11558 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) { |
11559 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11560 | TORCH_INTERNAL_ASSERT(false, |
11561 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11562 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11563 | } else { |
11564 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11565 | at::AutoDispatchSkipFunctionalize guard; |
11566 | at::Tensor tmp_output = at::_ops::put_out::call(self_, index_, source_, accumulate, out_); |
11567 | return out;; |
11568 | } |
11569 | } else { |
11570 | at::Tensor tmp_output; |
11571 | { |
11572 | at::AutoDispatchSkipFunctionalize guard; |
11573 | tmp_output = at::_ops::put::call(self_, index_, source_, accumulate); |
11574 | } |
11575 | at::functionalization::impl::replace_(out, tmp_output); |
11576 | at::functionalization::impl::commit_update(out); |
11577 | at::functionalization::impl::sync(out); |
11578 | return out; |
11579 | } |
11580 | } |
11581 | |
11582 | at::Tensor & put_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) { |
11583 | if (true) { |
11584 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11585 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11586 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11587 | auto self_meta = to_meta(self); |
11588 | auto index_meta = to_meta(index); |
11589 | auto source_meta = to_meta(source); |
11590 | at::AutoDispatchSkipFunctionalize func_guard; |
11591 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11592 | at::_ops::put_::call(self_meta, index_meta, source_meta, accumulate); |
11593 | } |
11594 | |
11595 | at::Tensor self_; |
11596 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11597 | at::functionalization::impl::sync(self); |
11598 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11599 | } else { |
11600 | self_ = self; |
11601 | } |
11602 | |
11603 | at::Tensor index_; |
11604 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
11605 | at::functionalization::impl::sync(index); |
11606 | index_ = at::functionalization::impl::from_functional_tensor(index); |
11607 | } else { |
11608 | index_ = index; |
11609 | } |
11610 | |
11611 | at::Tensor source_; |
11612 | if (at::functionalization::impl::isFunctionalTensor(source)) { |
11613 | at::functionalization::impl::sync(source); |
11614 | source_ = at::functionalization::impl::from_functional_tensor(source); |
11615 | } else { |
11616 | source_ = source; |
11617 | } |
11618 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11619 | if ((false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) { |
11620 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11621 | TORCH_INTERNAL_ASSERT(false, |
11622 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11623 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11624 | } else { |
11625 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11626 | at::AutoDispatchSkipFunctionalize guard; |
11627 | at::Tensor tmp_output = at::_ops::put_::call(self_, index_, source_, accumulate); |
11628 | return self;; |
11629 | } |
11630 | } else { |
11631 | at::Tensor tmp_output; |
11632 | { |
11633 | at::AutoDispatchSkipFunctionalize guard; |
11634 | tmp_output = at::_ops::put::call(self_, index_, source_, accumulate); |
11635 | } |
11636 | at::functionalization::impl::replace_(self, tmp_output); |
11637 | at::functionalization::impl::commit_update(self); |
11638 | at::functionalization::impl::sync(self); |
11639 | return self; |
11640 | } |
11641 | } |
11642 | |
11643 | at::Tensor & index_add_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) { |
11644 | if (false) { |
11645 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11646 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11647 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11648 | auto self_meta = to_meta(self); |
11649 | auto index_meta = to_meta(index); |
11650 | auto source_meta = to_meta(source); |
11651 | auto out_meta = to_meta(out); |
11652 | at::AutoDispatchSkipFunctionalize func_guard; |
11653 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11654 | at::_ops::index_add_out::call(self_meta, dim, index_meta, source_meta, alpha, out_meta); |
11655 | } |
11656 | |
11657 | at::Tensor self_; |
11658 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11659 | at::functionalization::impl::sync(self); |
11660 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11661 | } else { |
11662 | self_ = self; |
11663 | } |
11664 | |
11665 | at::Tensor index_; |
11666 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
11667 | at::functionalization::impl::sync(index); |
11668 | index_ = at::functionalization::impl::from_functional_tensor(index); |
11669 | } else { |
11670 | index_ = index; |
11671 | } |
11672 | |
11673 | at::Tensor source_; |
11674 | if (at::functionalization::impl::isFunctionalTensor(source)) { |
11675 | at::functionalization::impl::sync(source); |
11676 | source_ = at::functionalization::impl::from_functional_tensor(source); |
11677 | } else { |
11678 | source_ = source; |
11679 | } |
11680 | |
11681 | at::Tensor out_; |
11682 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11683 | at::functionalization::impl::sync(out); |
11684 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11685 | } else { |
11686 | out_ = out; |
11687 | } |
11688 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11689 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) { |
11690 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11691 | TORCH_INTERNAL_ASSERT(false, |
11692 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11693 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11694 | } else { |
11695 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11696 | at::AutoDispatchSkipFunctionalize guard; |
11697 | at::Tensor tmp_output = at::_ops::index_add_out::call(self_, dim, index_, source_, alpha, out_); |
11698 | return out;; |
11699 | } |
11700 | } else { |
11701 | at::Tensor tmp_output; |
11702 | { |
11703 | at::AutoDispatchSkipFunctionalize guard; |
11704 | tmp_output = at::_ops::index_add::call(self_, dim, index_, source_, alpha); |
11705 | } |
11706 | at::functionalization::impl::replace_(out, tmp_output); |
11707 | at::functionalization::impl::commit_update(out); |
11708 | at::functionalization::impl::sync(out); |
11709 | return out; |
11710 | } |
11711 | } |
11712 | |
11713 | at::Tensor & index_add_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) { |
11714 | if (true) { |
11715 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11716 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11717 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11718 | auto self_meta = to_meta(self); |
11719 | auto index_meta = to_meta(index); |
11720 | auto source_meta = to_meta(source); |
11721 | at::AutoDispatchSkipFunctionalize func_guard; |
11722 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11723 | at::_ops::index_add_::call(self_meta, dim, index_meta, source_meta, alpha); |
11724 | } |
11725 | |
11726 | at::Tensor self_; |
11727 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11728 | at::functionalization::impl::sync(self); |
11729 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11730 | } else { |
11731 | self_ = self; |
11732 | } |
11733 | |
11734 | at::Tensor index_; |
11735 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
11736 | at::functionalization::impl::sync(index); |
11737 | index_ = at::functionalization::impl::from_functional_tensor(index); |
11738 | } else { |
11739 | index_ = index; |
11740 | } |
11741 | |
11742 | at::Tensor source_; |
11743 | if (at::functionalization::impl::isFunctionalTensor(source)) { |
11744 | at::functionalization::impl::sync(source); |
11745 | source_ = at::functionalization::impl::from_functional_tensor(source); |
11746 | } else { |
11747 | source_ = source; |
11748 | } |
11749 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11750 | if ((false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) { |
11751 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11752 | TORCH_INTERNAL_ASSERT(false, |
11753 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11754 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11755 | } else { |
11756 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11757 | at::AutoDispatchSkipFunctionalize guard; |
11758 | at::Tensor tmp_output = at::_ops::index_add_::call(self_, dim, index_, source_, alpha); |
11759 | return self;; |
11760 | } |
11761 | } else { |
11762 | at::Tensor tmp_output; |
11763 | { |
11764 | at::AutoDispatchSkipFunctionalize guard; |
11765 | tmp_output = at::_ops::index_add::call(self_, dim, index_, source_, alpha); |
11766 | } |
11767 | at::functionalization::impl::replace_(self, tmp_output); |
11768 | at::functionalization::impl::commit_update(self); |
11769 | at::functionalization::impl::sync(self); |
11770 | return self; |
11771 | } |
11772 | } |
11773 | |
11774 | at::Tensor & index_fill_out_int_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { |
11775 | if (false) { |
11776 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11777 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11778 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11779 | auto self_meta = to_meta(self); |
11780 | auto index_meta = to_meta(index); |
11781 | auto out_meta = to_meta(out); |
11782 | at::AutoDispatchSkipFunctionalize func_guard; |
11783 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11784 | at::_ops::index_fill_int_Scalar_out::call(self_meta, dim, index_meta, value, out_meta); |
11785 | } |
11786 | |
11787 | at::Tensor self_; |
11788 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11789 | at::functionalization::impl::sync(self); |
11790 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11791 | } else { |
11792 | self_ = self; |
11793 | } |
11794 | |
11795 | at::Tensor index_; |
11796 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
11797 | at::functionalization::impl::sync(index); |
11798 | index_ = at::functionalization::impl::from_functional_tensor(index); |
11799 | } else { |
11800 | index_ = index; |
11801 | } |
11802 | |
11803 | at::Tensor out_; |
11804 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11805 | at::functionalization::impl::sync(out); |
11806 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11807 | } else { |
11808 | out_ = out; |
11809 | } |
11810 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11811 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) { |
11812 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11813 | TORCH_INTERNAL_ASSERT(false, |
11814 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11815 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11816 | } else { |
11817 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11818 | at::AutoDispatchSkipFunctionalize guard; |
11819 | at::Tensor tmp_output = at::_ops::index_fill_int_Scalar_out::call(self_, dim, index_, value, out_); |
11820 | return out;; |
11821 | } |
11822 | } else { |
11823 | at::Tensor tmp_output; |
11824 | { |
11825 | at::AutoDispatchSkipFunctionalize guard; |
11826 | tmp_output = at::_ops::index_fill_int_Scalar::call(self_, dim, index_, value); |
11827 | } |
11828 | at::functionalization::impl::replace_(out, tmp_output); |
11829 | at::functionalization::impl::commit_update(out); |
11830 | at::functionalization::impl::sync(out); |
11831 | return out; |
11832 | } |
11833 | } |
11834 | |
11835 | at::Tensor & index_fill__int_Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { |
11836 | if (true) { |
11837 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11838 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11839 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11840 | auto self_meta = to_meta(self); |
11841 | auto index_meta = to_meta(index); |
11842 | at::AutoDispatchSkipFunctionalize func_guard; |
11843 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11844 | at::_ops::index_fill__int_Scalar::call(self_meta, dim, index_meta, value); |
11845 | } |
11846 | |
11847 | at::Tensor self_; |
11848 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11849 | at::functionalization::impl::sync(self); |
11850 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11851 | } else { |
11852 | self_ = self; |
11853 | } |
11854 | |
11855 | at::Tensor index_; |
11856 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
11857 | at::functionalization::impl::sync(index); |
11858 | index_ = at::functionalization::impl::from_functional_tensor(index); |
11859 | } else { |
11860 | index_ = index; |
11861 | } |
11862 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11863 | if ((false || at::functionalization::impl::isFunctionalTensor(index))) { |
11864 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11865 | TORCH_INTERNAL_ASSERT(false, |
11866 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11867 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11868 | } else { |
11869 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11870 | at::AutoDispatchSkipFunctionalize guard; |
11871 | at::Tensor tmp_output = at::_ops::index_fill__int_Scalar::call(self_, dim, index_, value); |
11872 | return self;; |
11873 | } |
11874 | } else { |
11875 | at::Tensor tmp_output; |
11876 | { |
11877 | at::AutoDispatchSkipFunctionalize guard; |
11878 | tmp_output = at::_ops::index_fill_int_Scalar::call(self_, dim, index_, value); |
11879 | } |
11880 | at::functionalization::impl::replace_(self, tmp_output); |
11881 | at::functionalization::impl::commit_update(self); |
11882 | at::functionalization::impl::sync(self); |
11883 | return self; |
11884 | } |
11885 | } |
11886 | |
11887 | at::Tensor & index_fill_out_int_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) { |
11888 | if (false) { |
11889 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11890 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11891 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11892 | auto self_meta = to_meta(self); |
11893 | auto index_meta = to_meta(index); |
11894 | auto value_meta = to_meta(value); |
11895 | auto out_meta = to_meta(out); |
11896 | at::AutoDispatchSkipFunctionalize func_guard; |
11897 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11898 | at::_ops::index_fill_int_Tensor_out::call(self_meta, dim, index_meta, value_meta, out_meta); |
11899 | } |
11900 | |
11901 | at::Tensor self_; |
11902 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11903 | at::functionalization::impl::sync(self); |
11904 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11905 | } else { |
11906 | self_ = self; |
11907 | } |
11908 | |
11909 | at::Tensor index_; |
11910 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
11911 | at::functionalization::impl::sync(index); |
11912 | index_ = at::functionalization::impl::from_functional_tensor(index); |
11913 | } else { |
11914 | index_ = index; |
11915 | } |
11916 | |
11917 | at::Tensor value_; |
11918 | if (at::functionalization::impl::isFunctionalTensor(value)) { |
11919 | at::functionalization::impl::sync(value); |
11920 | value_ = at::functionalization::impl::from_functional_tensor(value); |
11921 | } else { |
11922 | value_ = value; |
11923 | } |
11924 | |
11925 | at::Tensor out_; |
11926 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11927 | at::functionalization::impl::sync(out); |
11928 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11929 | } else { |
11930 | out_ = out; |
11931 | } |
11932 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11933 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(value))) { |
11934 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11935 | TORCH_INTERNAL_ASSERT(false, |
11936 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11937 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11938 | } else { |
11939 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11940 | at::AutoDispatchSkipFunctionalize guard; |
11941 | at::Tensor tmp_output = at::_ops::index_fill_int_Tensor_out::call(self_, dim, index_, value_, out_); |
11942 | return out;; |
11943 | } |
11944 | } else { |
11945 | at::Tensor tmp_output; |
11946 | { |
11947 | at::AutoDispatchSkipFunctionalize guard; |
11948 | tmp_output = at::_ops::index_fill_int_Tensor::call(self_, dim, index_, value_); |
11949 | } |
11950 | at::functionalization::impl::replace_(out, tmp_output); |
11951 | at::functionalization::impl::commit_update(out); |
11952 | at::functionalization::impl::sync(out); |
11953 | return out; |
11954 | } |
11955 | } |
11956 | |
11957 | at::Tensor & index_fill__int_Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { |
11958 | if (true) { |
11959 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11960 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11961 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11962 | auto self_meta = to_meta(self); |
11963 | auto index_meta = to_meta(index); |
11964 | auto value_meta = to_meta(value); |
11965 | at::AutoDispatchSkipFunctionalize func_guard; |
11966 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11967 | at::_ops::index_fill__int_Tensor::call(self_meta, dim, index_meta, value_meta); |
11968 | } |
11969 | |
11970 | at::Tensor self_; |
11971 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11972 | at::functionalization::impl::sync(self); |
11973 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11974 | } else { |
11975 | self_ = self; |
11976 | } |
11977 | |
11978 | at::Tensor index_; |
11979 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
11980 | at::functionalization::impl::sync(index); |
11981 | index_ = at::functionalization::impl::from_functional_tensor(index); |
11982 | } else { |
11983 | index_ = index; |
11984 | } |
11985 | |
11986 | at::Tensor value_; |
11987 | if (at::functionalization::impl::isFunctionalTensor(value)) { |
11988 | at::functionalization::impl::sync(value); |
11989 | value_ = at::functionalization::impl::from_functional_tensor(value); |
11990 | } else { |
11991 | value_ = value; |
11992 | } |
11993 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11994 | if ((false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(value))) { |
11995 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11996 | TORCH_INTERNAL_ASSERT(false, |
11997 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11998 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11999 | } else { |
12000 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12001 | at::AutoDispatchSkipFunctionalize guard; |
12002 | at::Tensor tmp_output = at::_ops::index_fill__int_Tensor::call(self_, dim, index_, value_); |
12003 | return self;; |
12004 | } |
12005 | } else { |
12006 | at::Tensor tmp_output; |
12007 | { |
12008 | at::AutoDispatchSkipFunctionalize guard; |
12009 | tmp_output = at::_ops::index_fill_int_Tensor::call(self_, dim, index_, value_); |
12010 | } |
12011 | at::functionalization::impl::replace_(self, tmp_output); |
12012 | at::functionalization::impl::commit_update(self); |
12013 | at::functionalization::impl::sync(self); |
12014 | return self; |
12015 | } |
12016 | } |
12017 | |
12018 | at::Tensor & scatter_out_src_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) { |
12019 | if (false) { |
12020 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12021 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12022 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12023 | auto self_meta = to_meta(self); |
12024 | auto index_meta = to_meta(index); |
12025 | auto src_meta = to_meta(src); |
12026 | auto out_meta = to_meta(out); |
12027 | at::AutoDispatchSkipFunctionalize func_guard; |
12028 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12029 | at::_ops::scatter_src_out::call(self_meta, dim, index_meta, src_meta, out_meta); |
12030 | } |
12031 | |
12032 | at::Tensor self_; |
12033 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12034 | at::functionalization::impl::sync(self); |
12035 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12036 | } else { |
12037 | self_ = self; |
12038 | } |
12039 | |
12040 | at::Tensor index_; |
12041 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
12042 | at::functionalization::impl::sync(index); |
12043 | index_ = at::functionalization::impl::from_functional_tensor(index); |
12044 | } else { |
12045 | index_ = index; |
12046 | } |
12047 | |
12048 | at::Tensor src_; |
12049 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
12050 | at::functionalization::impl::sync(src); |
12051 | src_ = at::functionalization::impl::from_functional_tensor(src); |
12052 | } else { |
12053 | src_ = src; |
12054 | } |
12055 | |
12056 | at::Tensor out_; |
12057 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12058 | at::functionalization::impl::sync(out); |
12059 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12060 | } else { |
12061 | out_ = out; |
12062 | } |
12063 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12064 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) { |
12065 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12066 | TORCH_INTERNAL_ASSERT(false, |
12067 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12068 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12069 | } else { |
12070 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12071 | at::AutoDispatchSkipFunctionalize guard; |
12072 | at::Tensor tmp_output = at::_ops::scatter_src_out::call(self_, dim, index_, src_, out_); |
12073 | return out;; |
12074 | } |
12075 | } else { |
12076 | at::Tensor tmp_output; |
12077 | { |
12078 | at::AutoDispatchSkipFunctionalize guard; |
12079 | tmp_output = at::_ops::scatter_src::call(self_, dim, index_, src_); |
12080 | } |
12081 | at::functionalization::impl::replace_(out, tmp_output); |
12082 | at::functionalization::impl::commit_update(out); |
12083 | at::functionalization::impl::sync(out); |
12084 | return out; |
12085 | } |
12086 | } |
12087 | |
12088 | at::Tensor & scatter__src(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { |
12089 | if (true) { |
12090 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12091 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12092 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12093 | auto self_meta = to_meta(self); |
12094 | auto index_meta = to_meta(index); |
12095 | auto src_meta = to_meta(src); |
12096 | at::AutoDispatchSkipFunctionalize func_guard; |
12097 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12098 | at::_ops::scatter__src::call(self_meta, dim, index_meta, src_meta); |
12099 | } |
12100 | |
12101 | at::Tensor self_; |
12102 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12103 | at::functionalization::impl::sync(self); |
12104 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12105 | } else { |
12106 | self_ = self; |
12107 | } |
12108 | |
12109 | at::Tensor index_; |
12110 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
12111 | at::functionalization::impl::sync(index); |
12112 | index_ = at::functionalization::impl::from_functional_tensor(index); |
12113 | } else { |
12114 | index_ = index; |
12115 | } |
12116 | |
12117 | at::Tensor src_; |
12118 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
12119 | at::functionalization::impl::sync(src); |
12120 | src_ = at::functionalization::impl::from_functional_tensor(src); |
12121 | } else { |
12122 | src_ = src; |
12123 | } |
12124 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12125 | if ((false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) { |
12126 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12127 | TORCH_INTERNAL_ASSERT(false, |
12128 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12129 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12130 | } else { |
12131 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12132 | at::AutoDispatchSkipFunctionalize guard; |
12133 | at::Tensor tmp_output = at::_ops::scatter__src::call(self_, dim, index_, src_); |
12134 | return self;; |
12135 | } |
12136 | } else { |
12137 | at::Tensor tmp_output; |
12138 | { |
12139 | at::AutoDispatchSkipFunctionalize guard; |
12140 | tmp_output = at::_ops::scatter_src::call(self_, dim, index_, src_); |
12141 | } |
12142 | at::functionalization::impl::replace_(self, tmp_output); |
12143 | at::functionalization::impl::commit_update(self); |
12144 | at::functionalization::impl::sync(self); |
12145 | return self; |
12146 | } |
12147 | } |
12148 | |
12149 | at::Tensor & scatter_out_value_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { |
12150 | if (false) { |
12151 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12152 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12153 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12154 | auto self_meta = to_meta(self); |
12155 | auto index_meta = to_meta(index); |
12156 | auto out_meta = to_meta(out); |
12157 | at::AutoDispatchSkipFunctionalize func_guard; |
12158 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12159 | at::_ops::scatter_value_out::call(self_meta, dim, index_meta, value, out_meta); |
12160 | } |
12161 | |
12162 | at::Tensor self_; |
12163 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12164 | at::functionalization::impl::sync(self); |
12165 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12166 | } else { |
12167 | self_ = self; |
12168 | } |
12169 | |
12170 | at::Tensor index_; |
12171 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
12172 | at::functionalization::impl::sync(index); |
12173 | index_ = at::functionalization::impl::from_functional_tensor(index); |
12174 | } else { |
12175 | index_ = index; |
12176 | } |
12177 | |
12178 | at::Tensor out_; |
12179 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12180 | at::functionalization::impl::sync(out); |
12181 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12182 | } else { |
12183 | out_ = out; |
12184 | } |
12185 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12186 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) { |
12187 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12188 | TORCH_INTERNAL_ASSERT(false, |
12189 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12190 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12191 | } else { |
12192 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12193 | at::AutoDispatchSkipFunctionalize guard; |
12194 | at::Tensor tmp_output = at::_ops::scatter_value_out::call(self_, dim, index_, value, out_); |
12195 | return out;; |
12196 | } |
12197 | } else { |
12198 | at::Tensor tmp_output; |
12199 | { |
12200 | at::AutoDispatchSkipFunctionalize guard; |
12201 | tmp_output = at::_ops::scatter_value::call(self_, dim, index_, value); |
12202 | } |
12203 | at::functionalization::impl::replace_(out, tmp_output); |
12204 | at::functionalization::impl::commit_update(out); |
12205 | at::functionalization::impl::sync(out); |
12206 | return out; |
12207 | } |
12208 | } |
12209 | |
12210 | at::Tensor & scatter__value(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { |
12211 | if (true) { |
12212 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12213 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12214 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12215 | auto self_meta = to_meta(self); |
12216 | auto index_meta = to_meta(index); |
12217 | at::AutoDispatchSkipFunctionalize func_guard; |
12218 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12219 | at::_ops::scatter__value::call(self_meta, dim, index_meta, value); |
12220 | } |
12221 | |
12222 | at::Tensor self_; |
12223 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12224 | at::functionalization::impl::sync(self); |
12225 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12226 | } else { |
12227 | self_ = self; |
12228 | } |
12229 | |
12230 | at::Tensor index_; |
12231 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
12232 | at::functionalization::impl::sync(index); |
12233 | index_ = at::functionalization::impl::from_functional_tensor(index); |
12234 | } else { |
12235 | index_ = index; |
12236 | } |
12237 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12238 | if ((false || at::functionalization::impl::isFunctionalTensor(index))) { |
12239 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12240 | TORCH_INTERNAL_ASSERT(false, |
12241 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12242 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12243 | } else { |
12244 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12245 | at::AutoDispatchSkipFunctionalize guard; |
12246 | at::Tensor tmp_output = at::_ops::scatter__value::call(self_, dim, index_, value); |
12247 | return self;; |
12248 | } |
12249 | } else { |
12250 | at::Tensor tmp_output; |
12251 | { |
12252 | at::AutoDispatchSkipFunctionalize guard; |
12253 | tmp_output = at::_ops::scatter_value::call(self_, dim, index_, value); |
12254 | } |
12255 | at::functionalization::impl::replace_(self, tmp_output); |
12256 | at::functionalization::impl::commit_update(self); |
12257 | at::functionalization::impl::sync(self); |
12258 | return self; |
12259 | } |
12260 | } |
12261 | |
12262 | at::Tensor & scatter_out_reduce_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) { |
12263 | if (false) { |
12264 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12265 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12266 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12267 | auto self_meta = to_meta(self); |
12268 | auto index_meta = to_meta(index); |
12269 | auto src_meta = to_meta(src); |
12270 | auto out_meta = to_meta(out); |
12271 | at::AutoDispatchSkipFunctionalize func_guard; |
12272 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12273 | at::_ops::scatter_reduce_out::call(self_meta, dim, index_meta, src_meta, reduce, out_meta); |
12274 | } |
12275 | |
12276 | at::Tensor self_; |
12277 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12278 | at::functionalization::impl::sync(self); |
12279 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12280 | } else { |
12281 | self_ = self; |
12282 | } |
12283 | |
12284 | at::Tensor index_; |
12285 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
12286 | at::functionalization::impl::sync(index); |
12287 | index_ = at::functionalization::impl::from_functional_tensor(index); |
12288 | } else { |
12289 | index_ = index; |
12290 | } |
12291 | |
12292 | at::Tensor src_; |
12293 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
12294 | at::functionalization::impl::sync(src); |
12295 | src_ = at::functionalization::impl::from_functional_tensor(src); |
12296 | } else { |
12297 | src_ = src; |
12298 | } |
12299 | |
12300 | at::Tensor out_; |
12301 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12302 | at::functionalization::impl::sync(out); |
12303 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12304 | } else { |
12305 | out_ = out; |
12306 | } |
12307 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12308 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) { |
12309 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12310 | TORCH_INTERNAL_ASSERT(false, |
12311 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12312 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12313 | } else { |
12314 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12315 | at::AutoDispatchSkipFunctionalize guard; |
12316 | at::Tensor tmp_output = at::_ops::scatter_reduce_out::call(self_, dim, index_, src_, reduce, out_); |
12317 | return out;; |
12318 | } |
12319 | } else { |
12320 | at::Tensor tmp_output; |
12321 | { |
12322 | at::AutoDispatchSkipFunctionalize guard; |
12323 | tmp_output = at::_ops::scatter_reduce::call(self_, dim, index_, src_, reduce); |
12324 | } |
12325 | at::functionalization::impl::replace_(out, tmp_output); |
12326 | at::functionalization::impl::commit_update(out); |
12327 | at::functionalization::impl::sync(out); |
12328 | return out; |
12329 | } |
12330 | } |
12331 | |
12332 | at::Tensor & scatter__reduce(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { |
12333 | if (true) { |
12334 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12335 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12336 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12337 | auto self_meta = to_meta(self); |
12338 | auto index_meta = to_meta(index); |
12339 | auto src_meta = to_meta(src); |
12340 | at::AutoDispatchSkipFunctionalize func_guard; |
12341 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12342 | at::_ops::scatter__reduce::call(self_meta, dim, index_meta, src_meta, reduce); |
12343 | } |
12344 | |
12345 | at::Tensor self_; |
12346 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12347 | at::functionalization::impl::sync(self); |
12348 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12349 | } else { |
12350 | self_ = self; |
12351 | } |
12352 | |
12353 | at::Tensor index_; |
12354 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
12355 | at::functionalization::impl::sync(index); |
12356 | index_ = at::functionalization::impl::from_functional_tensor(index); |
12357 | } else { |
12358 | index_ = index; |
12359 | } |
12360 | |
12361 | at::Tensor src_; |
12362 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
12363 | at::functionalization::impl::sync(src); |
12364 | src_ = at::functionalization::impl::from_functional_tensor(src); |
12365 | } else { |
12366 | src_ = src; |
12367 | } |
12368 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12369 | if ((false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) { |
12370 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12371 | TORCH_INTERNAL_ASSERT(false, |
12372 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12373 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12374 | } else { |
12375 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12376 | at::AutoDispatchSkipFunctionalize guard; |
12377 | at::Tensor tmp_output = at::_ops::scatter__reduce::call(self_, dim, index_, src_, reduce); |
12378 | return self;; |
12379 | } |
12380 | } else { |
12381 | at::Tensor tmp_output; |
12382 | { |
12383 | at::AutoDispatchSkipFunctionalize guard; |
12384 | tmp_output = at::_ops::scatter_reduce::call(self_, dim, index_, src_, reduce); |
12385 | } |
12386 | at::functionalization::impl::replace_(self, tmp_output); |
12387 | at::functionalization::impl::commit_update(self); |
12388 | at::functionalization::impl::sync(self); |
12389 | return self; |
12390 | } |
12391 | } |
12392 | |
12393 | at::Tensor & scatter_out_value_reduce_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) { |
12394 | if (false) { |
12395 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12396 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12397 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12398 | auto self_meta = to_meta(self); |
12399 | auto index_meta = to_meta(index); |
12400 | auto out_meta = to_meta(out); |
12401 | at::AutoDispatchSkipFunctionalize func_guard; |
12402 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12403 | at::_ops::scatter_value_reduce_out::call(self_meta, dim, index_meta, value, reduce, out_meta); |
12404 | } |
12405 | |
12406 | at::Tensor self_; |
12407 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12408 | at::functionalization::impl::sync(self); |
12409 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12410 | } else { |
12411 | self_ = self; |
12412 | } |
12413 | |
12414 | at::Tensor index_; |
12415 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
12416 | at::functionalization::impl::sync(index); |
12417 | index_ = at::functionalization::impl::from_functional_tensor(index); |
12418 | } else { |
12419 | index_ = index; |
12420 | } |
12421 | |
12422 | at::Tensor out_; |
12423 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12424 | at::functionalization::impl::sync(out); |
12425 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12426 | } else { |
12427 | out_ = out; |
12428 | } |
12429 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12430 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) { |
12431 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12432 | TORCH_INTERNAL_ASSERT(false, |
12433 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12434 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12435 | } else { |
12436 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12437 | at::AutoDispatchSkipFunctionalize guard; |
12438 | at::Tensor tmp_output = at::_ops::scatter_value_reduce_out::call(self_, dim, index_, value, reduce, out_); |
12439 | return out;; |
12440 | } |
12441 | } else { |
12442 | at::Tensor tmp_output; |
12443 | { |
12444 | at::AutoDispatchSkipFunctionalize guard; |
12445 | tmp_output = at::_ops::scatter_value_reduce::call(self_, dim, index_, value, reduce); |
12446 | } |
12447 | at::functionalization::impl::replace_(out, tmp_output); |
12448 | at::functionalization::impl::commit_update(out); |
12449 | at::functionalization::impl::sync(out); |
12450 | return out; |
12451 | } |
12452 | } |
12453 | |
12454 | at::Tensor & scatter__value_reduce(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { |
12455 | if (true) { |
12456 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12457 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12458 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12459 | auto self_meta = to_meta(self); |
12460 | auto index_meta = to_meta(index); |
12461 | at::AutoDispatchSkipFunctionalize func_guard; |
12462 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12463 | at::_ops::scatter__value_reduce::call(self_meta, dim, index_meta, value, reduce); |
12464 | } |
12465 | |
12466 | at::Tensor self_; |
12467 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12468 | at::functionalization::impl::sync(self); |
12469 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12470 | } else { |
12471 | self_ = self; |
12472 | } |
12473 | |
12474 | at::Tensor index_; |
12475 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
12476 | at::functionalization::impl::sync(index); |
12477 | index_ = at::functionalization::impl::from_functional_tensor(index); |
12478 | } else { |
12479 | index_ = index; |
12480 | } |
12481 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12482 | if ((false || at::functionalization::impl::isFunctionalTensor(index))) { |
12483 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12484 | TORCH_INTERNAL_ASSERT(false, |
12485 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12486 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12487 | } else { |
12488 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12489 | at::AutoDispatchSkipFunctionalize guard; |
12490 | at::Tensor tmp_output = at::_ops::scatter__value_reduce::call(self_, dim, index_, value, reduce); |
12491 | return self;; |
12492 | } |
12493 | } else { |
12494 | at::Tensor tmp_output; |
12495 | { |
12496 | at::AutoDispatchSkipFunctionalize guard; |
12497 | tmp_output = at::_ops::scatter_value_reduce::call(self_, dim, index_, value, reduce); |
12498 | } |
12499 | at::functionalization::impl::replace_(self, tmp_output); |
12500 | at::functionalization::impl::commit_update(self); |
12501 | at::functionalization::impl::sync(self); |
12502 | return self; |
12503 | } |
12504 | } |
12505 | |
12506 | at::Tensor & scatter_add_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) { |
12507 | if (false) { |
12508 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12509 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12510 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12511 | auto self_meta = to_meta(self); |
12512 | auto index_meta = to_meta(index); |
12513 | auto src_meta = to_meta(src); |
12514 | auto out_meta = to_meta(out); |
12515 | at::AutoDispatchSkipFunctionalize func_guard; |
12516 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12517 | at::_ops::scatter_add_out::call(self_meta, dim, index_meta, src_meta, out_meta); |
12518 | } |
12519 | |
12520 | at::Tensor self_; |
12521 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12522 | at::functionalization::impl::sync(self); |
12523 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12524 | } else { |
12525 | self_ = self; |
12526 | } |
12527 | |
12528 | at::Tensor index_; |
12529 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
12530 | at::functionalization::impl::sync(index); |
12531 | index_ = at::functionalization::impl::from_functional_tensor(index); |
12532 | } else { |
12533 | index_ = index; |
12534 | } |
12535 | |
12536 | at::Tensor src_; |
12537 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
12538 | at::functionalization::impl::sync(src); |
12539 | src_ = at::functionalization::impl::from_functional_tensor(src); |
12540 | } else { |
12541 | src_ = src; |
12542 | } |
12543 | |
12544 | at::Tensor out_; |
12545 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12546 | at::functionalization::impl::sync(out); |
12547 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12548 | } else { |
12549 | out_ = out; |
12550 | } |
12551 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12552 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) { |
12553 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12554 | TORCH_INTERNAL_ASSERT(false, |
12555 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12556 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12557 | } else { |
12558 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12559 | at::AutoDispatchSkipFunctionalize guard; |
12560 | at::Tensor tmp_output = at::_ops::scatter_add_out::call(self_, dim, index_, src_, out_); |
12561 | return out;; |
12562 | } |
12563 | } else { |
12564 | at::Tensor tmp_output; |
12565 | { |
12566 | at::AutoDispatchSkipFunctionalize guard; |
12567 | tmp_output = at::_ops::scatter_add::call(self_, dim, index_, src_); |
12568 | } |
12569 | at::functionalization::impl::replace_(out, tmp_output); |
12570 | at::functionalization::impl::commit_update(out); |
12571 | at::functionalization::impl::sync(out); |
12572 | return out; |
12573 | } |
12574 | } |
12575 | |
12576 | at::Tensor & scatter_add_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { |
12577 | if (true) { |
12578 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12579 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12580 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12581 | auto self_meta = to_meta(self); |
12582 | auto index_meta = to_meta(index); |
12583 | auto src_meta = to_meta(src); |
12584 | at::AutoDispatchSkipFunctionalize func_guard; |
12585 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12586 | at::_ops::scatter_add_::call(self_meta, dim, index_meta, src_meta); |
12587 | } |
12588 | |
12589 | at::Tensor self_; |
12590 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12591 | at::functionalization::impl::sync(self); |
12592 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12593 | } else { |
12594 | self_ = self; |
12595 | } |
12596 | |
12597 | at::Tensor index_; |
12598 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
12599 | at::functionalization::impl::sync(index); |
12600 | index_ = at::functionalization::impl::from_functional_tensor(index); |
12601 | } else { |
12602 | index_ = index; |
12603 | } |
12604 | |
12605 | at::Tensor src_; |
12606 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
12607 | at::functionalization::impl::sync(src); |
12608 | src_ = at::functionalization::impl::from_functional_tensor(src); |
12609 | } else { |
12610 | src_ = src; |
12611 | } |
12612 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12613 | if ((false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) { |
12614 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12615 | TORCH_INTERNAL_ASSERT(false, |
12616 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12617 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12618 | } else { |
12619 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12620 | at::AutoDispatchSkipFunctionalize guard; |
12621 | at::Tensor tmp_output = at::_ops::scatter_add_::call(self_, dim, index_, src_); |
12622 | return self;; |
12623 | } |
12624 | } else { |
12625 | at::Tensor tmp_output; |
12626 | { |
12627 | at::AutoDispatchSkipFunctionalize guard; |
12628 | tmp_output = at::_ops::scatter_add::call(self_, dim, index_, src_); |
12629 | } |
12630 | at::functionalization::impl::replace_(self, tmp_output); |
12631 | at::functionalization::impl::commit_update(self); |
12632 | at::functionalization::impl::sync(self); |
12633 | return self; |
12634 | } |
12635 | } |
12636 | |
12637 | at::Tensor & __lshift___out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
12638 | if (false) { |
12639 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12640 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12641 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12642 | auto self_meta = to_meta(self); |
12643 | auto out_meta = to_meta(out); |
12644 | at::AutoDispatchSkipFunctionalize func_guard; |
12645 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12646 | at::_ops::__lshift___Scalar_out::call(self_meta, other, out_meta); |
12647 | } |
12648 | |
12649 | at::Tensor self_; |
12650 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12651 | at::functionalization::impl::sync(self); |
12652 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12653 | } else { |
12654 | self_ = self; |
12655 | } |
12656 | |
12657 | at::Tensor out_; |
12658 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12659 | at::functionalization::impl::sync(out); |
12660 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12661 | } else { |
12662 | out_ = out; |
12663 | } |
12664 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12665 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12666 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12667 | TORCH_INTERNAL_ASSERT(false, |
12668 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12669 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12670 | } else { |
12671 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12672 | at::AutoDispatchSkipFunctionalize guard; |
12673 | at::Tensor tmp_output = at::_ops::__lshift___Scalar_out::call(self_, other, out_); |
12674 | return out;; |
12675 | } |
12676 | } else { |
12677 | at::Tensor tmp_output; |
12678 | { |
12679 | at::AutoDispatchSkipFunctionalize guard; |
12680 | tmp_output = at::_ops::__lshift___Scalar::call(self_, other); |
12681 | } |
12682 | at::functionalization::impl::replace_(out, tmp_output); |
12683 | at::functionalization::impl::commit_update(out); |
12684 | at::functionalization::impl::sync(out); |
12685 | return out; |
12686 | } |
12687 | } |
12688 | |
12689 | at::Tensor & __ilshift___Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
12690 | if (true) { |
12691 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12692 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12693 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12694 | auto self_meta = to_meta(self); |
12695 | at::AutoDispatchSkipFunctionalize func_guard; |
12696 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12697 | at::_ops::__ilshift___Scalar::call(self_meta, other); |
12698 | } |
12699 | |
12700 | at::Tensor self_; |
12701 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12702 | at::functionalization::impl::sync(self); |
12703 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12704 | } else { |
12705 | self_ = self; |
12706 | } |
12707 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12708 | if ((false)) { |
12709 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12710 | TORCH_INTERNAL_ASSERT(false, |
12711 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12712 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12713 | } else { |
12714 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12715 | at::AutoDispatchSkipFunctionalize guard; |
12716 | at::Tensor tmp_output = at::_ops::__ilshift___Scalar::call(self_, other); |
12717 | return self;; |
12718 | } |
12719 | } else { |
12720 | at::Tensor tmp_output; |
12721 | { |
12722 | at::AutoDispatchSkipFunctionalize guard; |
12723 | tmp_output = at::_ops::__lshift___Scalar::call(self_, other); |
12724 | } |
12725 | at::functionalization::impl::replace_(self, tmp_output); |
12726 | at::functionalization::impl::commit_update(self); |
12727 | at::functionalization::impl::sync(self); |
12728 | return self; |
12729 | } |
12730 | } |
12731 | |
12732 | at::Tensor & __lshift___out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
12733 | if (false) { |
12734 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12735 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12736 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12737 | auto self_meta = to_meta(self); |
12738 | auto other_meta = to_meta(other); |
12739 | auto out_meta = to_meta(out); |
12740 | at::AutoDispatchSkipFunctionalize func_guard; |
12741 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12742 | at::_ops::__lshift___Tensor_out::call(self_meta, other_meta, out_meta); |
12743 | } |
12744 | |
12745 | at::Tensor self_; |
12746 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12747 | at::functionalization::impl::sync(self); |
12748 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12749 | } else { |
12750 | self_ = self; |
12751 | } |
12752 | |
12753 | at::Tensor other_; |
12754 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
12755 | at::functionalization::impl::sync(other); |
12756 | other_ = at::functionalization::impl::from_functional_tensor(other); |
12757 | } else { |
12758 | other_ = other; |
12759 | } |
12760 | |
12761 | at::Tensor out_; |
12762 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12763 | at::functionalization::impl::sync(out); |
12764 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12765 | } else { |
12766 | out_ = out; |
12767 | } |
12768 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12769 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
12770 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12771 | TORCH_INTERNAL_ASSERT(false, |
12772 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12773 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12774 | } else { |
12775 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12776 | at::AutoDispatchSkipFunctionalize guard; |
12777 | at::Tensor tmp_output = at::_ops::__lshift___Tensor_out::call(self_, other_, out_); |
12778 | return out;; |
12779 | } |
12780 | } else { |
12781 | at::Tensor tmp_output; |
12782 | { |
12783 | at::AutoDispatchSkipFunctionalize guard; |
12784 | tmp_output = at::_ops::__lshift___Tensor::call(self_, other_); |
12785 | } |
12786 | at::functionalization::impl::replace_(out, tmp_output); |
12787 | at::functionalization::impl::commit_update(out); |
12788 | at::functionalization::impl::sync(out); |
12789 | return out; |
12790 | } |
12791 | } |
12792 | |
12793 | at::Tensor & __ilshift___Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
12794 | if (true) { |
12795 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12796 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12797 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12798 | auto self_meta = to_meta(self); |
12799 | auto other_meta = to_meta(other); |
12800 | at::AutoDispatchSkipFunctionalize func_guard; |
12801 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12802 | at::_ops::__ilshift___Tensor::call(self_meta, other_meta); |
12803 | } |
12804 | |
12805 | at::Tensor self_; |
12806 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12807 | at::functionalization::impl::sync(self); |
12808 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12809 | } else { |
12810 | self_ = self; |
12811 | } |
12812 | |
12813 | at::Tensor other_; |
12814 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
12815 | at::functionalization::impl::sync(other); |
12816 | other_ = at::functionalization::impl::from_functional_tensor(other); |
12817 | } else { |
12818 | other_ = other; |
12819 | } |
12820 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12821 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
12822 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12823 | TORCH_INTERNAL_ASSERT(false, |
12824 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12825 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12826 | } else { |
12827 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12828 | at::AutoDispatchSkipFunctionalize guard; |
12829 | at::Tensor tmp_output = at::_ops::__ilshift___Tensor::call(self_, other_); |
12830 | return self;; |
12831 | } |
12832 | } else { |
12833 | at::Tensor tmp_output; |
12834 | { |
12835 | at::AutoDispatchSkipFunctionalize guard; |
12836 | tmp_output = at::_ops::__lshift___Tensor::call(self_, other_); |
12837 | } |
12838 | at::functionalization::impl::replace_(self, tmp_output); |
12839 | at::functionalization::impl::commit_update(self); |
12840 | at::functionalization::impl::sync(self); |
12841 | return self; |
12842 | } |
12843 | } |
12844 | |
12845 | at::Tensor & greater_equal_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
12846 | if (false) { |
12847 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12848 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12849 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12850 | auto self_meta = to_meta(self); |
12851 | auto out_meta = to_meta(out); |
12852 | at::AutoDispatchSkipFunctionalize func_guard; |
12853 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12854 | at::_ops::greater_equal_Scalar_out::call(self_meta, other, out_meta); |
12855 | } |
12856 | |
12857 | at::Tensor self_; |
12858 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12859 | at::functionalization::impl::sync(self); |
12860 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12861 | } else { |
12862 | self_ = self; |
12863 | } |
12864 | |
12865 | at::Tensor out_; |
12866 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12867 | at::functionalization::impl::sync(out); |
12868 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12869 | } else { |
12870 | out_ = out; |
12871 | } |
12872 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12873 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12874 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12875 | TORCH_INTERNAL_ASSERT(false, |
12876 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12877 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12878 | } else { |
12879 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12880 | at::AutoDispatchSkipFunctionalize guard; |
12881 | at::Tensor tmp_output = at::_ops::greater_equal_Scalar_out::call(self_, other, out_); |
12882 | return out;; |
12883 | } |
12884 | } else { |
12885 | at::Tensor tmp_output; |
12886 | { |
12887 | at::AutoDispatchSkipFunctionalize guard; |
12888 | tmp_output = at::_ops::greater_equal_Scalar::call(self_, other); |
12889 | } |
12890 | at::functionalization::impl::replace_(out, tmp_output); |
12891 | at::functionalization::impl::commit_update(out); |
12892 | at::functionalization::impl::sync(out); |
12893 | return out; |
12894 | } |
12895 | } |
12896 | |
12897 | at::Tensor & greater_equal__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
12898 | if (true) { |
12899 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12900 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12901 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12902 | auto self_meta = to_meta(self); |
12903 | at::AutoDispatchSkipFunctionalize func_guard; |
12904 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12905 | at::_ops::greater_equal__Scalar::call(self_meta, other); |
12906 | } |
12907 | |
12908 | at::Tensor self_; |
12909 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12910 | at::functionalization::impl::sync(self); |
12911 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12912 | } else { |
12913 | self_ = self; |
12914 | } |
12915 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12916 | if ((false)) { |
12917 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12918 | TORCH_INTERNAL_ASSERT(false, |
12919 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12920 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12921 | } else { |
12922 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12923 | at::AutoDispatchSkipFunctionalize guard; |
12924 | at::Tensor tmp_output = at::_ops::greater_equal__Scalar::call(self_, other); |
12925 | return self;; |
12926 | } |
12927 | } else { |
12928 | at::Tensor tmp_output; |
12929 | { |
12930 | at::AutoDispatchSkipFunctionalize guard; |
12931 | tmp_output = at::_ops::greater_equal_Scalar::call(self_, other); |
12932 | } |
12933 | at::functionalization::impl::replace_(self, tmp_output); |
12934 | at::functionalization::impl::commit_update(self); |
12935 | at::functionalization::impl::sync(self); |
12936 | return self; |
12937 | } |
12938 | } |
12939 | |
12940 | at::Tensor & greater_equal_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
12941 | if (false) { |
12942 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12943 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12944 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12945 | auto self_meta = to_meta(self); |
12946 | auto other_meta = to_meta(other); |
12947 | auto out_meta = to_meta(out); |
12948 | at::AutoDispatchSkipFunctionalize func_guard; |
12949 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12950 | at::_ops::greater_equal_Tensor_out::call(self_meta, other_meta, out_meta); |
12951 | } |
12952 | |
12953 | at::Tensor self_; |
12954 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12955 | at::functionalization::impl::sync(self); |
12956 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12957 | } else { |
12958 | self_ = self; |
12959 | } |
12960 | |
12961 | at::Tensor other_; |
12962 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
12963 | at::functionalization::impl::sync(other); |
12964 | other_ = at::functionalization::impl::from_functional_tensor(other); |
12965 | } else { |
12966 | other_ = other; |
12967 | } |
12968 | |
12969 | at::Tensor out_; |
12970 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12971 | at::functionalization::impl::sync(out); |
12972 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12973 | } else { |
12974 | out_ = out; |
12975 | } |
12976 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12977 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
12978 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12979 | TORCH_INTERNAL_ASSERT(false, |
12980 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12981 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12982 | } else { |
12983 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12984 | at::AutoDispatchSkipFunctionalize guard; |
12985 | at::Tensor tmp_output = at::_ops::greater_equal_Tensor_out::call(self_, other_, out_); |
12986 | return out;; |
12987 | } |
12988 | } else { |
12989 | at::Tensor tmp_output; |
12990 | { |
12991 | at::AutoDispatchSkipFunctionalize guard; |
12992 | tmp_output = at::_ops::greater_equal_Tensor::call(self_, other_); |
12993 | } |
12994 | at::functionalization::impl::replace_(out, tmp_output); |
12995 | at::functionalization::impl::commit_update(out); |
12996 | at::functionalization::impl::sync(out); |
12997 | return out; |
12998 | } |
12999 | } |
13000 | |
13001 | at::Tensor & greater_equal__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
13002 | if (true) { |
13003 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13004 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13005 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13006 | auto self_meta = to_meta(self); |
13007 | auto other_meta = to_meta(other); |
13008 | at::AutoDispatchSkipFunctionalize func_guard; |
13009 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13010 | at::_ops::greater_equal__Tensor::call(self_meta, other_meta); |
13011 | } |
13012 | |
13013 | at::Tensor self_; |
13014 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13015 | at::functionalization::impl::sync(self); |
13016 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13017 | } else { |
13018 | self_ = self; |
13019 | } |
13020 | |
13021 | at::Tensor other_; |
13022 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13023 | at::functionalization::impl::sync(other); |
13024 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13025 | } else { |
13026 | other_ = other; |
13027 | } |
13028 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13029 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
13030 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13031 | TORCH_INTERNAL_ASSERT(false, |
13032 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13033 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13034 | } else { |
13035 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13036 | at::AutoDispatchSkipFunctionalize guard; |
13037 | at::Tensor tmp_output = at::_ops::greater_equal__Tensor::call(self_, other_); |
13038 | return self;; |
13039 | } |
13040 | } else { |
13041 | at::Tensor tmp_output; |
13042 | { |
13043 | at::AutoDispatchSkipFunctionalize guard; |
13044 | tmp_output = at::_ops::greater_equal_Tensor::call(self_, other_); |
13045 | } |
13046 | at::functionalization::impl::replace_(self, tmp_output); |
13047 | at::functionalization::impl::commit_update(self); |
13048 | at::functionalization::impl::sync(self); |
13049 | return self; |
13050 | } |
13051 | } |
13052 | |
13053 | at::Tensor & less_equal_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
13054 | if (false) { |
13055 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13056 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13057 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13058 | auto self_meta = to_meta(self); |
13059 | auto out_meta = to_meta(out); |
13060 | at::AutoDispatchSkipFunctionalize func_guard; |
13061 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13062 | at::_ops::less_equal_Scalar_out::call(self_meta, other, out_meta); |
13063 | } |
13064 | |
13065 | at::Tensor self_; |
13066 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13067 | at::functionalization::impl::sync(self); |
13068 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13069 | } else { |
13070 | self_ = self; |
13071 | } |
13072 | |
13073 | at::Tensor out_; |
13074 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13075 | at::functionalization::impl::sync(out); |
13076 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13077 | } else { |
13078 | out_ = out; |
13079 | } |
13080 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13081 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
13082 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13083 | TORCH_INTERNAL_ASSERT(false, |
13084 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13085 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13086 | } else { |
13087 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13088 | at::AutoDispatchSkipFunctionalize guard; |
13089 | at::Tensor tmp_output = at::_ops::less_equal_Scalar_out::call(self_, other, out_); |
13090 | return out;; |
13091 | } |
13092 | } else { |
13093 | at::Tensor tmp_output; |
13094 | { |
13095 | at::AutoDispatchSkipFunctionalize guard; |
13096 | tmp_output = at::_ops::less_equal_Scalar::call(self_, other); |
13097 | } |
13098 | at::functionalization::impl::replace_(out, tmp_output); |
13099 | at::functionalization::impl::commit_update(out); |
13100 | at::functionalization::impl::sync(out); |
13101 | return out; |
13102 | } |
13103 | } |
13104 | |
13105 | at::Tensor & less_equal__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
13106 | if (true) { |
13107 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13108 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13109 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13110 | auto self_meta = to_meta(self); |
13111 | at::AutoDispatchSkipFunctionalize func_guard; |
13112 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13113 | at::_ops::less_equal__Scalar::call(self_meta, other); |
13114 | } |
13115 | |
13116 | at::Tensor self_; |
13117 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13118 | at::functionalization::impl::sync(self); |
13119 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13120 | } else { |
13121 | self_ = self; |
13122 | } |
13123 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13124 | if ((false)) { |
13125 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13126 | TORCH_INTERNAL_ASSERT(false, |
13127 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13128 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13129 | } else { |
13130 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13131 | at::AutoDispatchSkipFunctionalize guard; |
13132 | at::Tensor tmp_output = at::_ops::less_equal__Scalar::call(self_, other); |
13133 | return self;; |
13134 | } |
13135 | } else { |
13136 | at::Tensor tmp_output; |
13137 | { |
13138 | at::AutoDispatchSkipFunctionalize guard; |
13139 | tmp_output = at::_ops::less_equal_Scalar::call(self_, other); |
13140 | } |
13141 | at::functionalization::impl::replace_(self, tmp_output); |
13142 | at::functionalization::impl::commit_update(self); |
13143 | at::functionalization::impl::sync(self); |
13144 | return self; |
13145 | } |
13146 | } |
13147 | |
13148 | at::Tensor & less_equal_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
13149 | if (false) { |
13150 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13151 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13152 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13153 | auto self_meta = to_meta(self); |
13154 | auto other_meta = to_meta(other); |
13155 | auto out_meta = to_meta(out); |
13156 | at::AutoDispatchSkipFunctionalize func_guard; |
13157 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13158 | at::_ops::less_equal_Tensor_out::call(self_meta, other_meta, out_meta); |
13159 | } |
13160 | |
13161 | at::Tensor self_; |
13162 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13163 | at::functionalization::impl::sync(self); |
13164 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13165 | } else { |
13166 | self_ = self; |
13167 | } |
13168 | |
13169 | at::Tensor other_; |
13170 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13171 | at::functionalization::impl::sync(other); |
13172 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13173 | } else { |
13174 | other_ = other; |
13175 | } |
13176 | |
13177 | at::Tensor out_; |
13178 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13179 | at::functionalization::impl::sync(out); |
13180 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13181 | } else { |
13182 | out_ = out; |
13183 | } |
13184 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13185 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
13186 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13187 | TORCH_INTERNAL_ASSERT(false, |
13188 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13189 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13190 | } else { |
13191 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13192 | at::AutoDispatchSkipFunctionalize guard; |
13193 | at::Tensor tmp_output = at::_ops::less_equal_Tensor_out::call(self_, other_, out_); |
13194 | return out;; |
13195 | } |
13196 | } else { |
13197 | at::Tensor tmp_output; |
13198 | { |
13199 | at::AutoDispatchSkipFunctionalize guard; |
13200 | tmp_output = at::_ops::less_equal_Tensor::call(self_, other_); |
13201 | } |
13202 | at::functionalization::impl::replace_(out, tmp_output); |
13203 | at::functionalization::impl::commit_update(out); |
13204 | at::functionalization::impl::sync(out); |
13205 | return out; |
13206 | } |
13207 | } |
13208 | |
13209 | at::Tensor & less_equal__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
13210 | if (true) { |
13211 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13212 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13213 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13214 | auto self_meta = to_meta(self); |
13215 | auto other_meta = to_meta(other); |
13216 | at::AutoDispatchSkipFunctionalize func_guard; |
13217 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13218 | at::_ops::less_equal__Tensor::call(self_meta, other_meta); |
13219 | } |
13220 | |
13221 | at::Tensor self_; |
13222 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13223 | at::functionalization::impl::sync(self); |
13224 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13225 | } else { |
13226 | self_ = self; |
13227 | } |
13228 | |
13229 | at::Tensor other_; |
13230 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13231 | at::functionalization::impl::sync(other); |
13232 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13233 | } else { |
13234 | other_ = other; |
13235 | } |
13236 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13237 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
13238 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13239 | TORCH_INTERNAL_ASSERT(false, |
13240 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13241 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13242 | } else { |
13243 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13244 | at::AutoDispatchSkipFunctionalize guard; |
13245 | at::Tensor tmp_output = at::_ops::less_equal__Tensor::call(self_, other_); |
13246 | return self;; |
13247 | } |
13248 | } else { |
13249 | at::Tensor tmp_output; |
13250 | { |
13251 | at::AutoDispatchSkipFunctionalize guard; |
13252 | tmp_output = at::_ops::less_equal_Tensor::call(self_, other_); |
13253 | } |
13254 | at::functionalization::impl::replace_(self, tmp_output); |
13255 | at::functionalization::impl::commit_update(self); |
13256 | at::functionalization::impl::sync(self); |
13257 | return self; |
13258 | } |
13259 | } |
13260 | |
13261 | at::Tensor & greater_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
13262 | if (false) { |
13263 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13264 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13265 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13266 | auto self_meta = to_meta(self); |
13267 | auto out_meta = to_meta(out); |
13268 | at::AutoDispatchSkipFunctionalize func_guard; |
13269 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13270 | at::_ops::greater_Scalar_out::call(self_meta, other, out_meta); |
13271 | } |
13272 | |
13273 | at::Tensor self_; |
13274 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13275 | at::functionalization::impl::sync(self); |
13276 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13277 | } else { |
13278 | self_ = self; |
13279 | } |
13280 | |
13281 | at::Tensor out_; |
13282 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13283 | at::functionalization::impl::sync(out); |
13284 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13285 | } else { |
13286 | out_ = out; |
13287 | } |
13288 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13289 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
13290 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13291 | TORCH_INTERNAL_ASSERT(false, |
13292 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13293 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13294 | } else { |
13295 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13296 | at::AutoDispatchSkipFunctionalize guard; |
13297 | at::Tensor tmp_output = at::_ops::greater_Scalar_out::call(self_, other, out_); |
13298 | return out;; |
13299 | } |
13300 | } else { |
13301 | at::Tensor tmp_output; |
13302 | { |
13303 | at::AutoDispatchSkipFunctionalize guard; |
13304 | tmp_output = at::_ops::greater_Scalar::call(self_, other); |
13305 | } |
13306 | at::functionalization::impl::replace_(out, tmp_output); |
13307 | at::functionalization::impl::commit_update(out); |
13308 | at::functionalization::impl::sync(out); |
13309 | return out; |
13310 | } |
13311 | } |
13312 | |
13313 | at::Tensor & greater__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
13314 | if (true) { |
13315 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13316 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13317 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13318 | auto self_meta = to_meta(self); |
13319 | at::AutoDispatchSkipFunctionalize func_guard; |
13320 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13321 | at::_ops::greater__Scalar::call(self_meta, other); |
13322 | } |
13323 | |
13324 | at::Tensor self_; |
13325 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13326 | at::functionalization::impl::sync(self); |
13327 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13328 | } else { |
13329 | self_ = self; |
13330 | } |
13331 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13332 | if ((false)) { |
13333 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13334 | TORCH_INTERNAL_ASSERT(false, |
13335 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13336 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13337 | } else { |
13338 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13339 | at::AutoDispatchSkipFunctionalize guard; |
13340 | at::Tensor tmp_output = at::_ops::greater__Scalar::call(self_, other); |
13341 | return self;; |
13342 | } |
13343 | } else { |
13344 | at::Tensor tmp_output; |
13345 | { |
13346 | at::AutoDispatchSkipFunctionalize guard; |
13347 | tmp_output = at::_ops::greater_Scalar::call(self_, other); |
13348 | } |
13349 | at::functionalization::impl::replace_(self, tmp_output); |
13350 | at::functionalization::impl::commit_update(self); |
13351 | at::functionalization::impl::sync(self); |
13352 | return self; |
13353 | } |
13354 | } |
13355 | |
13356 | at::Tensor & greater_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
13357 | if (false) { |
13358 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13359 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13360 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13361 | auto self_meta = to_meta(self); |
13362 | auto other_meta = to_meta(other); |
13363 | auto out_meta = to_meta(out); |
13364 | at::AutoDispatchSkipFunctionalize func_guard; |
13365 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13366 | at::_ops::greater_Tensor_out::call(self_meta, other_meta, out_meta); |
13367 | } |
13368 | |
13369 | at::Tensor self_; |
13370 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13371 | at::functionalization::impl::sync(self); |
13372 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13373 | } else { |
13374 | self_ = self; |
13375 | } |
13376 | |
13377 | at::Tensor other_; |
13378 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13379 | at::functionalization::impl::sync(other); |
13380 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13381 | } else { |
13382 | other_ = other; |
13383 | } |
13384 | |
13385 | at::Tensor out_; |
13386 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13387 | at::functionalization::impl::sync(out); |
13388 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13389 | } else { |
13390 | out_ = out; |
13391 | } |
13392 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13393 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
13394 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13395 | TORCH_INTERNAL_ASSERT(false, |
13396 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13397 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13398 | } else { |
13399 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13400 | at::AutoDispatchSkipFunctionalize guard; |
13401 | at::Tensor tmp_output = at::_ops::greater_Tensor_out::call(self_, other_, out_); |
13402 | return out;; |
13403 | } |
13404 | } else { |
13405 | at::Tensor tmp_output; |
13406 | { |
13407 | at::AutoDispatchSkipFunctionalize guard; |
13408 | tmp_output = at::_ops::greater_Tensor::call(self_, other_); |
13409 | } |
13410 | at::functionalization::impl::replace_(out, tmp_output); |
13411 | at::functionalization::impl::commit_update(out); |
13412 | at::functionalization::impl::sync(out); |
13413 | return out; |
13414 | } |
13415 | } |
13416 | |
13417 | at::Tensor & greater__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
13418 | if (true) { |
13419 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13420 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13421 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13422 | auto self_meta = to_meta(self); |
13423 | auto other_meta = to_meta(other); |
13424 | at::AutoDispatchSkipFunctionalize func_guard; |
13425 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13426 | at::_ops::greater__Tensor::call(self_meta, other_meta); |
13427 | } |
13428 | |
13429 | at::Tensor self_; |
13430 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13431 | at::functionalization::impl::sync(self); |
13432 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13433 | } else { |
13434 | self_ = self; |
13435 | } |
13436 | |
13437 | at::Tensor other_; |
13438 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13439 | at::functionalization::impl::sync(other); |
13440 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13441 | } else { |
13442 | other_ = other; |
13443 | } |
13444 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13445 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
13446 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13447 | TORCH_INTERNAL_ASSERT(false, |
13448 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13449 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13450 | } else { |
13451 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13452 | at::AutoDispatchSkipFunctionalize guard; |
13453 | at::Tensor tmp_output = at::_ops::greater__Tensor::call(self_, other_); |
13454 | return self;; |
13455 | } |
13456 | } else { |
13457 | at::Tensor tmp_output; |
13458 | { |
13459 | at::AutoDispatchSkipFunctionalize guard; |
13460 | tmp_output = at::_ops::greater_Tensor::call(self_, other_); |
13461 | } |
13462 | at::functionalization::impl::replace_(self, tmp_output); |
13463 | at::functionalization::impl::commit_update(self); |
13464 | at::functionalization::impl::sync(self); |
13465 | return self; |
13466 | } |
13467 | } |
13468 | |
13469 | at::Tensor & lt_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
13470 | if (false) { |
13471 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13472 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13473 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13474 | auto self_meta = to_meta(self); |
13475 | auto out_meta = to_meta(out); |
13476 | at::AutoDispatchSkipFunctionalize func_guard; |
13477 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13478 | at::_ops::lt_Scalar_out::call(self_meta, other, out_meta); |
13479 | } |
13480 | |
13481 | at::Tensor self_; |
13482 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13483 | at::functionalization::impl::sync(self); |
13484 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13485 | } else { |
13486 | self_ = self; |
13487 | } |
13488 | |
13489 | at::Tensor out_; |
13490 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13491 | at::functionalization::impl::sync(out); |
13492 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13493 | } else { |
13494 | out_ = out; |
13495 | } |
13496 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13497 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
13498 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13499 | TORCH_INTERNAL_ASSERT(false, |
13500 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13501 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13502 | } else { |
13503 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13504 | at::AutoDispatchSkipFunctionalize guard; |
13505 | at::Tensor tmp_output = at::_ops::lt_Scalar_out::call(self_, other, out_); |
13506 | return out;; |
13507 | } |
13508 | } else { |
13509 | at::Tensor tmp_output; |
13510 | { |
13511 | at::AutoDispatchSkipFunctionalize guard; |
13512 | tmp_output = at::_ops::lt_Scalar::call(self_, other); |
13513 | } |
13514 | at::functionalization::impl::replace_(out, tmp_output); |
13515 | at::functionalization::impl::commit_update(out); |
13516 | at::functionalization::impl::sync(out); |
13517 | return out; |
13518 | } |
13519 | } |
13520 | |
13521 | at::Tensor & lt__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
13522 | if (true) { |
13523 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13524 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13525 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13526 | auto self_meta = to_meta(self); |
13527 | at::AutoDispatchSkipFunctionalize func_guard; |
13528 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13529 | at::_ops::lt__Scalar::call(self_meta, other); |
13530 | } |
13531 | |
13532 | at::Tensor self_; |
13533 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13534 | at::functionalization::impl::sync(self); |
13535 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13536 | } else { |
13537 | self_ = self; |
13538 | } |
13539 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13540 | if ((false)) { |
13541 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13542 | TORCH_INTERNAL_ASSERT(false, |
13543 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13544 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13545 | } else { |
13546 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13547 | at::AutoDispatchSkipFunctionalize guard; |
13548 | at::Tensor tmp_output = at::_ops::lt__Scalar::call(self_, other); |
13549 | return self;; |
13550 | } |
13551 | } else { |
13552 | at::Tensor tmp_output; |
13553 | { |
13554 | at::AutoDispatchSkipFunctionalize guard; |
13555 | tmp_output = at::_ops::lt_Scalar::call(self_, other); |
13556 | } |
13557 | at::functionalization::impl::replace_(self, tmp_output); |
13558 | at::functionalization::impl::commit_update(self); |
13559 | at::functionalization::impl::sync(self); |
13560 | return self; |
13561 | } |
13562 | } |
13563 | |
13564 | at::Tensor & lt_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
13565 | if (false) { |
13566 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13567 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13568 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13569 | auto self_meta = to_meta(self); |
13570 | auto other_meta = to_meta(other); |
13571 | auto out_meta = to_meta(out); |
13572 | at::AutoDispatchSkipFunctionalize func_guard; |
13573 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13574 | at::_ops::lt_Tensor_out::call(self_meta, other_meta, out_meta); |
13575 | } |
13576 | |
13577 | at::Tensor self_; |
13578 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13579 | at::functionalization::impl::sync(self); |
13580 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13581 | } else { |
13582 | self_ = self; |
13583 | } |
13584 | |
13585 | at::Tensor other_; |
13586 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13587 | at::functionalization::impl::sync(other); |
13588 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13589 | } else { |
13590 | other_ = other; |
13591 | } |
13592 | |
13593 | at::Tensor out_; |
13594 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13595 | at::functionalization::impl::sync(out); |
13596 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13597 | } else { |
13598 | out_ = out; |
13599 | } |
13600 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13601 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
13602 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13603 | TORCH_INTERNAL_ASSERT(false, |
13604 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13605 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13606 | } else { |
13607 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13608 | at::AutoDispatchSkipFunctionalize guard; |
13609 | at::Tensor tmp_output = at::_ops::lt_Tensor_out::call(self_, other_, out_); |
13610 | return out;; |
13611 | } |
13612 | } else { |
13613 | at::Tensor tmp_output; |
13614 | { |
13615 | at::AutoDispatchSkipFunctionalize guard; |
13616 | tmp_output = at::_ops::lt_Tensor::call(self_, other_); |
13617 | } |
13618 | at::functionalization::impl::replace_(out, tmp_output); |
13619 | at::functionalization::impl::commit_update(out); |
13620 | at::functionalization::impl::sync(out); |
13621 | return out; |
13622 | } |
13623 | } |
13624 | |
13625 | at::Tensor & lt__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
13626 | if (true) { |
13627 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13628 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13629 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13630 | auto self_meta = to_meta(self); |
13631 | auto other_meta = to_meta(other); |
13632 | at::AutoDispatchSkipFunctionalize func_guard; |
13633 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13634 | at::_ops::lt__Tensor::call(self_meta, other_meta); |
13635 | } |
13636 | |
13637 | at::Tensor self_; |
13638 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13639 | at::functionalization::impl::sync(self); |
13640 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13641 | } else { |
13642 | self_ = self; |
13643 | } |
13644 | |
13645 | at::Tensor other_; |
13646 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13647 | at::functionalization::impl::sync(other); |
13648 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13649 | } else { |
13650 | other_ = other; |
13651 | } |
13652 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13653 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
13654 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13655 | TORCH_INTERNAL_ASSERT(false, |
13656 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13657 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13658 | } else { |
13659 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13660 | at::AutoDispatchSkipFunctionalize guard; |
13661 | at::Tensor tmp_output = at::_ops::lt__Tensor::call(self_, other_); |
13662 | return self;; |
13663 | } |
13664 | } else { |
13665 | at::Tensor tmp_output; |
13666 | { |
13667 | at::AutoDispatchSkipFunctionalize guard; |
13668 | tmp_output = at::_ops::lt_Tensor::call(self_, other_); |
13669 | } |
13670 | at::functionalization::impl::replace_(self, tmp_output); |
13671 | at::functionalization::impl::commit_update(self); |
13672 | at::functionalization::impl::sync(self); |
13673 | return self; |
13674 | } |
13675 | } |
13676 | |
13677 | at::Tensor & less_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
13678 | if (false) { |
13679 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13680 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13681 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13682 | auto self_meta = to_meta(self); |
13683 | auto out_meta = to_meta(out); |
13684 | at::AutoDispatchSkipFunctionalize func_guard; |
13685 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13686 | at::_ops::less_Scalar_out::call(self_meta, other, out_meta); |
13687 | } |
13688 | |
13689 | at::Tensor self_; |
13690 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13691 | at::functionalization::impl::sync(self); |
13692 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13693 | } else { |
13694 | self_ = self; |
13695 | } |
13696 | |
13697 | at::Tensor out_; |
13698 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13699 | at::functionalization::impl::sync(out); |
13700 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13701 | } else { |
13702 | out_ = out; |
13703 | } |
13704 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13705 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
13706 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13707 | TORCH_INTERNAL_ASSERT(false, |
13708 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13709 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13710 | } else { |
13711 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13712 | at::AutoDispatchSkipFunctionalize guard; |
13713 | at::Tensor tmp_output = at::_ops::less_Scalar_out::call(self_, other, out_); |
13714 | return out;; |
13715 | } |
13716 | } else { |
13717 | at::Tensor tmp_output; |
13718 | { |
13719 | at::AutoDispatchSkipFunctionalize guard; |
13720 | tmp_output = at::_ops::less_Scalar::call(self_, other); |
13721 | } |
13722 | at::functionalization::impl::replace_(out, tmp_output); |
13723 | at::functionalization::impl::commit_update(out); |
13724 | at::functionalization::impl::sync(out); |
13725 | return out; |
13726 | } |
13727 | } |
13728 | |
13729 | at::Tensor & less__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
13730 | if (true) { |
13731 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13732 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13733 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13734 | auto self_meta = to_meta(self); |
13735 | at::AutoDispatchSkipFunctionalize func_guard; |
13736 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13737 | at::_ops::less__Scalar::call(self_meta, other); |
13738 | } |
13739 | |
13740 | at::Tensor self_; |
13741 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13742 | at::functionalization::impl::sync(self); |
13743 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13744 | } else { |
13745 | self_ = self; |
13746 | } |
13747 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13748 | if ((false)) { |
13749 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13750 | TORCH_INTERNAL_ASSERT(false, |
13751 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13752 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13753 | } else { |
13754 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13755 | at::AutoDispatchSkipFunctionalize guard; |
13756 | at::Tensor tmp_output = at::_ops::less__Scalar::call(self_, other); |
13757 | return self;; |
13758 | } |
13759 | } else { |
13760 | at::Tensor tmp_output; |
13761 | { |
13762 | at::AutoDispatchSkipFunctionalize guard; |
13763 | tmp_output = at::_ops::less_Scalar::call(self_, other); |
13764 | } |
13765 | at::functionalization::impl::replace_(self, tmp_output); |
13766 | at::functionalization::impl::commit_update(self); |
13767 | at::functionalization::impl::sync(self); |
13768 | return self; |
13769 | } |
13770 | } |
13771 | |
13772 | at::Tensor & less_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
13773 | if (false) { |
13774 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13775 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13776 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13777 | auto self_meta = to_meta(self); |
13778 | auto other_meta = to_meta(other); |
13779 | auto out_meta = to_meta(out); |
13780 | at::AutoDispatchSkipFunctionalize func_guard; |
13781 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13782 | at::_ops::less_Tensor_out::call(self_meta, other_meta, out_meta); |
13783 | } |
13784 | |
13785 | at::Tensor self_; |
13786 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13787 | at::functionalization::impl::sync(self); |
13788 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13789 | } else { |
13790 | self_ = self; |
13791 | } |
13792 | |
13793 | at::Tensor other_; |
13794 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13795 | at::functionalization::impl::sync(other); |
13796 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13797 | } else { |
13798 | other_ = other; |
13799 | } |
13800 | |
13801 | at::Tensor out_; |
13802 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13803 | at::functionalization::impl::sync(out); |
13804 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13805 | } else { |
13806 | out_ = out; |
13807 | } |
13808 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13809 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
13810 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13811 | TORCH_INTERNAL_ASSERT(false, |
13812 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13813 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13814 | } else { |
13815 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13816 | at::AutoDispatchSkipFunctionalize guard; |
13817 | at::Tensor tmp_output = at::_ops::less_Tensor_out::call(self_, other_, out_); |
13818 | return out;; |
13819 | } |
13820 | } else { |
13821 | at::Tensor tmp_output; |
13822 | { |
13823 | at::AutoDispatchSkipFunctionalize guard; |
13824 | tmp_output = at::_ops::less_Tensor::call(self_, other_); |
13825 | } |
13826 | at::functionalization::impl::replace_(out, tmp_output); |
13827 | at::functionalization::impl::commit_update(out); |
13828 | at::functionalization::impl::sync(out); |
13829 | return out; |
13830 | } |
13831 | } |
13832 | |
13833 | at::Tensor & less__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
13834 | if (true) { |
13835 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13836 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13837 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13838 | auto self_meta = to_meta(self); |
13839 | auto other_meta = to_meta(other); |
13840 | at::AutoDispatchSkipFunctionalize func_guard; |
13841 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13842 | at::_ops::less__Tensor::call(self_meta, other_meta); |
13843 | } |
13844 | |
13845 | at::Tensor self_; |
13846 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13847 | at::functionalization::impl::sync(self); |
13848 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13849 | } else { |
13850 | self_ = self; |
13851 | } |
13852 | |
13853 | at::Tensor other_; |
13854 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13855 | at::functionalization::impl::sync(other); |
13856 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13857 | } else { |
13858 | other_ = other; |
13859 | } |
13860 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13861 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
13862 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13863 | TORCH_INTERNAL_ASSERT(false, |
13864 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13865 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13866 | } else { |
13867 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13868 | at::AutoDispatchSkipFunctionalize guard; |
13869 | at::Tensor tmp_output = at::_ops::less__Tensor::call(self_, other_); |
13870 | return self;; |
13871 | } |
13872 | } else { |
13873 | at::Tensor tmp_output; |
13874 | { |
13875 | at::AutoDispatchSkipFunctionalize guard; |
13876 | tmp_output = at::_ops::less_Tensor::call(self_, other_); |
13877 | } |
13878 | at::functionalization::impl::replace_(self, tmp_output); |
13879 | at::functionalization::impl::commit_update(self); |
13880 | at::functionalization::impl::sync(self); |
13881 | return self; |
13882 | } |
13883 | } |
13884 | |
13885 | at::Tensor & take_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, at::Tensor & out) { |
13886 | if (false) { |
13887 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13888 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13889 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13890 | auto self_meta = to_meta(self); |
13891 | auto index_meta = to_meta(index); |
13892 | auto out_meta = to_meta(out); |
13893 | at::AutoDispatchSkipFunctionalize func_guard; |
13894 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13895 | at::_ops::take_out::call(self_meta, index_meta, out_meta); |
13896 | } |
13897 | |
13898 | at::Tensor self_; |
13899 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13900 | at::functionalization::impl::sync(self); |
13901 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13902 | } else { |
13903 | self_ = self; |
13904 | } |
13905 | |
13906 | at::Tensor index_; |
13907 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
13908 | at::functionalization::impl::sync(index); |
13909 | index_ = at::functionalization::impl::from_functional_tensor(index); |
13910 | } else { |
13911 | index_ = index; |
13912 | } |
13913 | |
13914 | at::Tensor out_; |
13915 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13916 | at::functionalization::impl::sync(out); |
13917 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13918 | } else { |
13919 | out_ = out; |
13920 | } |
13921 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13922 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) { |
13923 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13924 | TORCH_INTERNAL_ASSERT(false, |
13925 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13926 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13927 | } else { |
13928 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13929 | at::AutoDispatchSkipFunctionalize guard; |
13930 | at::Tensor tmp_output = at::_ops::take_out::call(self_, index_, out_); |
13931 | return out;; |
13932 | } |
13933 | } else { |
13934 | at::Tensor tmp_output; |
13935 | { |
13936 | at::AutoDispatchSkipFunctionalize guard; |
13937 | tmp_output = at::_ops::take::call(self_, index_); |
13938 | } |
13939 | at::functionalization::impl::replace_(out, tmp_output); |
13940 | at::functionalization::impl::commit_update(out); |
13941 | at::functionalization::impl::sync(out); |
13942 | return out; |
13943 | } |
13944 | } |
13945 | |
13946 | at::Tensor & gather_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { |
13947 | if (false) { |
13948 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13949 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13950 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13951 | auto self_meta = to_meta(self); |
13952 | auto index_meta = to_meta(index); |
13953 | auto out_meta = to_meta(out); |
13954 | at::AutoDispatchSkipFunctionalize func_guard; |
13955 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13956 | at::_ops::gather_out::call(self_meta, dim, index_meta, sparse_grad, out_meta); |
13957 | } |
13958 | |
13959 | at::Tensor self_; |
13960 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13961 | at::functionalization::impl::sync(self); |
13962 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13963 | } else { |
13964 | self_ = self; |
13965 | } |
13966 | |
13967 | at::Tensor index_; |
13968 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
13969 | at::functionalization::impl::sync(index); |
13970 | index_ = at::functionalization::impl::from_functional_tensor(index); |
13971 | } else { |
13972 | index_ = index; |
13973 | } |
13974 | |
13975 | at::Tensor out_; |
13976 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13977 | at::functionalization::impl::sync(out); |
13978 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13979 | } else { |
13980 | out_ = out; |
13981 | } |
13982 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13983 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) { |
13984 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13985 | TORCH_INTERNAL_ASSERT(false, |
13986 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13987 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13988 | } else { |
13989 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13990 | at::AutoDispatchSkipFunctionalize guard; |
13991 | at::Tensor tmp_output = at::_ops::gather_out::call(self_, dim, index_, sparse_grad, out_); |
13992 | return out;; |
13993 | } |
13994 | } else { |
13995 | at::Tensor tmp_output; |
13996 | { |
13997 | at::AutoDispatchSkipFunctionalize guard; |
13998 | tmp_output = at::_ops::gather::call(self_, dim, index_, sparse_grad); |
13999 | } |
14000 | at::functionalization::impl::replace_(out, tmp_output); |
14001 | at::functionalization::impl::commit_update(out); |
14002 | at::functionalization::impl::sync(out); |
14003 | return out; |
14004 | } |
14005 | } |
14006 | |
14007 | at::Tensor & gather_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { |
14008 | if (false) { |
14009 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14010 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14011 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14012 | auto self_meta = to_meta(self); |
14013 | auto index_meta = to_meta(index); |
14014 | auto out_meta = to_meta(out); |
14015 | at::AutoDispatchSkipFunctionalize func_guard; |
14016 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14017 | at::_ops::gather_dimname_out::call(self_meta, dim, index_meta, sparse_grad, out_meta); |
14018 | } |
14019 | |
14020 | at::Tensor self_; |
14021 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14022 | at::functionalization::impl::sync(self); |
14023 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14024 | } else { |
14025 | self_ = self; |
14026 | } |
14027 | |
14028 | at::Tensor index_; |
14029 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
14030 | at::functionalization::impl::sync(index); |
14031 | index_ = at::functionalization::impl::from_functional_tensor(index); |
14032 | } else { |
14033 | index_ = index; |
14034 | } |
14035 | |
14036 | at::Tensor out_; |
14037 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14038 | at::functionalization::impl::sync(out); |
14039 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14040 | } else { |
14041 | out_ = out; |
14042 | } |
14043 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14044 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) { |
14045 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14046 | TORCH_INTERNAL_ASSERT(false, |
14047 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14048 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14049 | } else { |
14050 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14051 | at::AutoDispatchSkipFunctionalize guard; |
14052 | at::Tensor tmp_output = at::_ops::gather_dimname_out::call(self_, dim, index_, sparse_grad, out_); |
14053 | return out;; |
14054 | } |
14055 | } else { |
14056 | at::Tensor tmp_output; |
14057 | { |
14058 | at::AutoDispatchSkipFunctionalize guard; |
14059 | tmp_output = at::_ops::gather_dimname::call(self_, dim, index_, sparse_grad); |
14060 | } |
14061 | at::functionalization::impl::replace_(out, tmp_output); |
14062 | at::functionalization::impl::commit_update(out); |
14063 | at::functionalization::impl::sync(out); |
14064 | return out; |
14065 | } |
14066 | } |
14067 | |
14068 | at::Tensor & cholesky_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) { |
14069 | if (false) { |
14070 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14071 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14072 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14073 | auto self_meta = to_meta(self); |
14074 | auto out_meta = to_meta(out); |
14075 | at::AutoDispatchSkipFunctionalize func_guard; |
14076 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14077 | at::_ops::cholesky_out::call(self_meta, upper, out_meta); |
14078 | } |
14079 | |
14080 | at::Tensor self_; |
14081 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14082 | at::functionalization::impl::sync(self); |
14083 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14084 | } else { |
14085 | self_ = self; |
14086 | } |
14087 | |
14088 | at::Tensor out_; |
14089 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14090 | at::functionalization::impl::sync(out); |
14091 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14092 | } else { |
14093 | out_ = out; |
14094 | } |
14095 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14096 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14097 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14098 | TORCH_INTERNAL_ASSERT(false, |
14099 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14100 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14101 | } else { |
14102 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14103 | at::AutoDispatchSkipFunctionalize guard; |
14104 | at::Tensor tmp_output = at::_ops::cholesky_out::call(self_, upper, out_); |
14105 | return out;; |
14106 | } |
14107 | } else { |
14108 | at::Tensor tmp_output; |
14109 | { |
14110 | at::AutoDispatchSkipFunctionalize guard; |
14111 | tmp_output = at::_ops::cholesky::call(self_, upper); |
14112 | } |
14113 | at::functionalization::impl::replace_(out, tmp_output); |
14114 | at::functionalization::impl::commit_update(out); |
14115 | at::functionalization::impl::sync(out); |
14116 | return out; |
14117 | } |
14118 | } |
14119 | |
14120 | at::Tensor & _cholesky_solve_helper_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out) { |
14121 | if (false) { |
14122 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14123 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14124 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14125 | auto self_meta = to_meta(self); |
14126 | auto A_meta = to_meta(A); |
14127 | auto out_meta = to_meta(out); |
14128 | at::AutoDispatchSkipFunctionalize func_guard; |
14129 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14130 | at::_ops::_cholesky_solve_helper_out::call(self_meta, A_meta, upper, out_meta); |
14131 | } |
14132 | |
14133 | at::Tensor self_; |
14134 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14135 | at::functionalization::impl::sync(self); |
14136 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14137 | } else { |
14138 | self_ = self; |
14139 | } |
14140 | |
14141 | at::Tensor A_; |
14142 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
14143 | at::functionalization::impl::sync(A); |
14144 | A_ = at::functionalization::impl::from_functional_tensor(A); |
14145 | } else { |
14146 | A_ = A; |
14147 | } |
14148 | |
14149 | at::Tensor out_; |
14150 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14151 | at::functionalization::impl::sync(out); |
14152 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14153 | } else { |
14154 | out_ = out; |
14155 | } |
14156 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14157 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(A))) { |
14158 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14159 | TORCH_INTERNAL_ASSERT(false, |
14160 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14161 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14162 | } else { |
14163 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14164 | at::AutoDispatchSkipFunctionalize guard; |
14165 | at::Tensor tmp_output = at::_ops::_cholesky_solve_helper_out::call(self_, A_, upper, out_); |
14166 | return out;; |
14167 | } |
14168 | } else { |
14169 | at::Tensor tmp_output; |
14170 | { |
14171 | at::AutoDispatchSkipFunctionalize guard; |
14172 | tmp_output = at::_ops::_cholesky_solve_helper::call(self_, A_, upper); |
14173 | } |
14174 | at::functionalization::impl::replace_(out, tmp_output); |
14175 | at::functionalization::impl::commit_update(out); |
14176 | at::functionalization::impl::sync(out); |
14177 | return out; |
14178 | } |
14179 | } |
14180 | |
14181 | at::Tensor & polygamma_out_out(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self, at::Tensor & out) { |
14182 | if (false) { |
14183 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14184 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14185 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14186 | auto self_meta = to_meta(self); |
14187 | auto out_meta = to_meta(out); |
14188 | at::AutoDispatchSkipFunctionalize func_guard; |
14189 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14190 | at::_ops::polygamma_out::call(n, self_meta, out_meta); |
14191 | } |
14192 | |
14193 | at::Tensor self_; |
14194 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14195 | at::functionalization::impl::sync(self); |
14196 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14197 | } else { |
14198 | self_ = self; |
14199 | } |
14200 | |
14201 | at::Tensor out_; |
14202 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14203 | at::functionalization::impl::sync(out); |
14204 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14205 | } else { |
14206 | out_ = out; |
14207 | } |
14208 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14209 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14210 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14211 | TORCH_INTERNAL_ASSERT(false, |
14212 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14213 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14214 | } else { |
14215 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14216 | at::AutoDispatchSkipFunctionalize guard; |
14217 | at::Tensor tmp_output = at::_ops::polygamma_out::call(n, self_, out_); |
14218 | return out;; |
14219 | } |
14220 | } else { |
14221 | at::Tensor tmp_output; |
14222 | { |
14223 | at::AutoDispatchSkipFunctionalize guard; |
14224 | tmp_output = at::_ops::polygamma::call(n, self_); |
14225 | } |
14226 | at::functionalization::impl::replace_(out, tmp_output); |
14227 | at::functionalization::impl::commit_update(out); |
14228 | at::functionalization::impl::sync(out); |
14229 | return out; |
14230 | } |
14231 | } |
14232 | |
14233 | at::Tensor & igamma_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
14234 | if (false) { |
14235 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14236 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14237 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14238 | auto self_meta = to_meta(self); |
14239 | auto other_meta = to_meta(other); |
14240 | auto out_meta = to_meta(out); |
14241 | at::AutoDispatchSkipFunctionalize func_guard; |
14242 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14243 | at::_ops::igamma_out::call(self_meta, other_meta, out_meta); |
14244 | } |
14245 | |
14246 | at::Tensor self_; |
14247 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14248 | at::functionalization::impl::sync(self); |
14249 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14250 | } else { |
14251 | self_ = self; |
14252 | } |
14253 | |
14254 | at::Tensor other_; |
14255 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
14256 | at::functionalization::impl::sync(other); |
14257 | other_ = at::functionalization::impl::from_functional_tensor(other); |
14258 | } else { |
14259 | other_ = other; |
14260 | } |
14261 | |
14262 | at::Tensor out_; |
14263 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14264 | at::functionalization::impl::sync(out); |
14265 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14266 | } else { |
14267 | out_ = out; |
14268 | } |
14269 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14270 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
14271 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14272 | TORCH_INTERNAL_ASSERT(false, |
14273 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14274 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14275 | } else { |
14276 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14277 | at::AutoDispatchSkipFunctionalize guard; |
14278 | at::Tensor tmp_output = at::_ops::igamma_out::call(self_, other_, out_); |
14279 | return out;; |
14280 | } |
14281 | } else { |
14282 | at::Tensor tmp_output; |
14283 | { |
14284 | at::AutoDispatchSkipFunctionalize guard; |
14285 | tmp_output = at::_ops::igamma::call(self_, other_); |
14286 | } |
14287 | at::functionalization::impl::replace_(out, tmp_output); |
14288 | at::functionalization::impl::commit_update(out); |
14289 | at::functionalization::impl::sync(out); |
14290 | return out; |
14291 | } |
14292 | } |
14293 | |
14294 | at::Tensor & igamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
14295 | if (true) { |
14296 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14297 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14298 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14299 | auto self_meta = to_meta(self); |
14300 | auto other_meta = to_meta(other); |
14301 | at::AutoDispatchSkipFunctionalize func_guard; |
14302 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14303 | at::_ops::igamma_::call(self_meta, other_meta); |
14304 | } |
14305 | |
14306 | at::Tensor self_; |
14307 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14308 | at::functionalization::impl::sync(self); |
14309 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14310 | } else { |
14311 | self_ = self; |
14312 | } |
14313 | |
14314 | at::Tensor other_; |
14315 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
14316 | at::functionalization::impl::sync(other); |
14317 | other_ = at::functionalization::impl::from_functional_tensor(other); |
14318 | } else { |
14319 | other_ = other; |
14320 | } |
14321 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14322 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
14323 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14324 | TORCH_INTERNAL_ASSERT(false, |
14325 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14326 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14327 | } else { |
14328 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14329 | at::AutoDispatchSkipFunctionalize guard; |
14330 | at::Tensor tmp_output = at::_ops::igamma_::call(self_, other_); |
14331 | return self;; |
14332 | } |
14333 | } else { |
14334 | at::Tensor tmp_output; |
14335 | { |
14336 | at::AutoDispatchSkipFunctionalize guard; |
14337 | tmp_output = at::_ops::igamma::call(self_, other_); |
14338 | } |
14339 | at::functionalization::impl::replace_(self, tmp_output); |
14340 | at::functionalization::impl::commit_update(self); |
14341 | at::functionalization::impl::sync(self); |
14342 | return self; |
14343 | } |
14344 | } |
14345 | |
14346 | at::Tensor & fmin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
14347 | if (false) { |
14348 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14349 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14350 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14351 | auto self_meta = to_meta(self); |
14352 | auto other_meta = to_meta(other); |
14353 | auto out_meta = to_meta(out); |
14354 | at::AutoDispatchSkipFunctionalize func_guard; |
14355 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14356 | at::_ops::fmin_out::call(self_meta, other_meta, out_meta); |
14357 | } |
14358 | |
14359 | at::Tensor self_; |
14360 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14361 | at::functionalization::impl::sync(self); |
14362 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14363 | } else { |
14364 | self_ = self; |
14365 | } |
14366 | |
14367 | at::Tensor other_; |
14368 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
14369 | at::functionalization::impl::sync(other); |
14370 | other_ = at::functionalization::impl::from_functional_tensor(other); |
14371 | } else { |
14372 | other_ = other; |
14373 | } |
14374 | |
14375 | at::Tensor out_; |
14376 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14377 | at::functionalization::impl::sync(out); |
14378 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14379 | } else { |
14380 | out_ = out; |
14381 | } |
14382 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14383 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
14384 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14385 | TORCH_INTERNAL_ASSERT(false, |
14386 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14387 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14388 | } else { |
14389 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14390 | at::AutoDispatchSkipFunctionalize guard; |
14391 | at::Tensor tmp_output = at::_ops::fmin_out::call(self_, other_, out_); |
14392 | return out;; |
14393 | } |
14394 | } else { |
14395 | at::Tensor tmp_output; |
14396 | { |
14397 | at::AutoDispatchSkipFunctionalize guard; |
14398 | tmp_output = at::_ops::fmin::call(self_, other_); |
14399 | } |
14400 | at::functionalization::impl::replace_(out, tmp_output); |
14401 | at::functionalization::impl::commit_update(out); |
14402 | at::functionalization::impl::sync(out); |
14403 | return out; |
14404 | } |
14405 | } |
14406 | |
14407 | at::Tensor & max_out_unary_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
14408 | if (false) { |
14409 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14410 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14411 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14412 | auto self_meta = to_meta(self); |
14413 | auto out_meta = to_meta(out); |
14414 | at::AutoDispatchSkipFunctionalize func_guard; |
14415 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14416 | at::_ops::max_unary_out::call(self_meta, out_meta); |
14417 | } |
14418 | |
14419 | at::Tensor self_; |
14420 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14421 | at::functionalization::impl::sync(self); |
14422 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14423 | } else { |
14424 | self_ = self; |
14425 | } |
14426 | |
14427 | at::Tensor out_; |
14428 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14429 | at::functionalization::impl::sync(out); |
14430 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14431 | } else { |
14432 | out_ = out; |
14433 | } |
14434 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14435 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14436 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14437 | TORCH_INTERNAL_ASSERT(false, |
14438 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14439 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14440 | } else { |
14441 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14442 | at::AutoDispatchSkipFunctionalize guard; |
14443 | at::Tensor tmp_output = at::_ops::max_unary_out::call(self_, out_); |
14444 | return out;; |
14445 | } |
14446 | } else { |
14447 | at::Tensor tmp_output; |
14448 | { |
14449 | at::AutoDispatchSkipFunctionalize guard; |
14450 | tmp_output = at::_ops::max::call(self_); |
14451 | } |
14452 | at::functionalization::impl::replace_(out, tmp_output); |
14453 | at::functionalization::impl::commit_update(out); |
14454 | at::functionalization::impl::sync(out); |
14455 | return out; |
14456 | } |
14457 | } |
14458 | |
14459 | at::Tensor & fmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
14460 | if (false) { |
14461 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14462 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14463 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14464 | auto self_meta = to_meta(self); |
14465 | auto other_meta = to_meta(other); |
14466 | auto out_meta = to_meta(out); |
14467 | at::AutoDispatchSkipFunctionalize func_guard; |
14468 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14469 | at::_ops::fmax_out::call(self_meta, other_meta, out_meta); |
14470 | } |
14471 | |
14472 | at::Tensor self_; |
14473 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14474 | at::functionalization::impl::sync(self); |
14475 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14476 | } else { |
14477 | self_ = self; |
14478 | } |
14479 | |
14480 | at::Tensor other_; |
14481 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
14482 | at::functionalization::impl::sync(other); |
14483 | other_ = at::functionalization::impl::from_functional_tensor(other); |
14484 | } else { |
14485 | other_ = other; |
14486 | } |
14487 | |
14488 | at::Tensor out_; |
14489 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14490 | at::functionalization::impl::sync(out); |
14491 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14492 | } else { |
14493 | out_ = out; |
14494 | } |
14495 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14496 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
14497 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14498 | TORCH_INTERNAL_ASSERT(false, |
14499 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14500 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14501 | } else { |
14502 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14503 | at::AutoDispatchSkipFunctionalize guard; |
14504 | at::Tensor tmp_output = at::_ops::fmax_out::call(self_, other_, out_); |
14505 | return out;; |
14506 | } |
14507 | } else { |
14508 | at::Tensor tmp_output; |
14509 | { |
14510 | at::AutoDispatchSkipFunctionalize guard; |
14511 | tmp_output = at::_ops::fmax::call(self_, other_); |
14512 | } |
14513 | at::functionalization::impl::replace_(out, tmp_output); |
14514 | at::functionalization::impl::commit_update(out); |
14515 | at::functionalization::impl::sync(out); |
14516 | return out; |
14517 | } |
14518 | } |
14519 | |
14520 | at::Tensor & maximum_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
14521 | if (false) { |
14522 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14523 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14524 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14525 | auto self_meta = to_meta(self); |
14526 | auto other_meta = to_meta(other); |
14527 | auto out_meta = to_meta(out); |
14528 | at::AutoDispatchSkipFunctionalize func_guard; |
14529 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14530 | at::_ops::maximum_out::call(self_meta, other_meta, out_meta); |
14531 | } |
14532 | |
14533 | at::Tensor self_; |
14534 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14535 | at::functionalization::impl::sync(self); |
14536 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14537 | } else { |
14538 | self_ = self; |
14539 | } |
14540 | |
14541 | at::Tensor other_; |
14542 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
14543 | at::functionalization::impl::sync(other); |
14544 | other_ = at::functionalization::impl::from_functional_tensor(other); |
14545 | } else { |
14546 | other_ = other; |
14547 | } |
14548 | |
14549 | at::Tensor out_; |
14550 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14551 | at::functionalization::impl::sync(out); |
14552 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14553 | } else { |
14554 | out_ = out; |
14555 | } |
14556 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14557 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
14558 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14559 | TORCH_INTERNAL_ASSERT(false, |
14560 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14561 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14562 | } else { |
14563 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14564 | at::AutoDispatchSkipFunctionalize guard; |
14565 | at::Tensor tmp_output = at::_ops::maximum_out::call(self_, other_, out_); |
14566 | return out;; |
14567 | } |
14568 | } else { |
14569 | at::Tensor tmp_output; |
14570 | { |
14571 | at::AutoDispatchSkipFunctionalize guard; |
14572 | tmp_output = at::_ops::maximum::call(self_, other_); |
14573 | } |
14574 | at::functionalization::impl::replace_(out, tmp_output); |
14575 | at::functionalization::impl::commit_update(out); |
14576 | at::functionalization::impl::sync(out); |
14577 | return out; |
14578 | } |
14579 | } |
14580 | |
14581 | at::Tensor & max_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
14582 | if (false) { |
14583 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14584 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14585 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14586 | auto self_meta = to_meta(self); |
14587 | auto other_meta = to_meta(other); |
14588 | auto out_meta = to_meta(out); |
14589 | at::AutoDispatchSkipFunctionalize func_guard; |
14590 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14591 | at::_ops::max_out::call(self_meta, other_meta, out_meta); |
14592 | } |
14593 | |
14594 | at::Tensor self_; |
14595 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14596 | at::functionalization::impl::sync(self); |
14597 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14598 | } else { |
14599 | self_ = self; |
14600 | } |
14601 | |
14602 | at::Tensor other_; |
14603 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
14604 | at::functionalization::impl::sync(other); |
14605 | other_ = at::functionalization::impl::from_functional_tensor(other); |
14606 | } else { |
14607 | other_ = other; |
14608 | } |
14609 | |
14610 | at::Tensor out_; |
14611 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14612 | at::functionalization::impl::sync(out); |
14613 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14614 | } else { |
14615 | out_ = out; |
14616 | } |
14617 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14618 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
14619 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14620 | TORCH_INTERNAL_ASSERT(false, |
14621 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14622 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14623 | } else { |
14624 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14625 | at::AutoDispatchSkipFunctionalize guard; |
14626 | at::Tensor tmp_output = at::_ops::max_out::call(self_, other_, out_); |
14627 | return out;; |
14628 | } |
14629 | } else { |
14630 | at::Tensor tmp_output; |
14631 | { |
14632 | at::AutoDispatchSkipFunctionalize guard; |
14633 | tmp_output = at::_ops::max_other::call(self_, other_); |
14634 | } |
14635 | at::functionalization::impl::replace_(out, tmp_output); |
14636 | at::functionalization::impl::commit_update(out); |
14637 | at::functionalization::impl::sync(out); |
14638 | return out; |
14639 | } |
14640 | } |
14641 | |
14642 | void _amp_foreach_non_finite_check_and_unscale_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) { |
14643 | if (false) { |
14644 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14645 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14646 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14647 | auto self_meta = to_meta(self); |
14648 | auto found_inf_meta = to_meta(found_inf); |
14649 | auto inv_scale_meta = to_meta(inv_scale); |
14650 | auto out_meta = to_meta(out); |
14651 | at::AutoDispatchSkipFunctionalize func_guard; |
14652 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14653 | at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self_meta, found_inf_meta, inv_scale_meta, out_meta); |
14654 | } |
14655 | |
14656 | ::std::vector<at::Tensor> self_; |
14657 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14658 | at::functionalization::impl::sync(self); |
14659 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14660 | } else { |
14661 | self_ = self.vec(); |
14662 | } |
14663 | |
14664 | at::Tensor found_inf_; |
14665 | if (at::functionalization::impl::isFunctionalTensor(found_inf)) { |
14666 | at::functionalization::impl::sync(found_inf); |
14667 | found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf); |
14668 | } else { |
14669 | found_inf_ = found_inf; |
14670 | } |
14671 | |
14672 | at::Tensor inv_scale_; |
14673 | if (at::functionalization::impl::isFunctionalTensor(inv_scale)) { |
14674 | at::functionalization::impl::sync(inv_scale); |
14675 | inv_scale_ = at::functionalization::impl::from_functional_tensor(inv_scale); |
14676 | } else { |
14677 | inv_scale_ = inv_scale; |
14678 | } |
14679 | |
14680 | ::std::vector<at::Tensor> out_; |
14681 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14682 | at::functionalization::impl::sync(out); |
14683 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14684 | } else { |
14685 | out_ = out.vec(); |
14686 | } |
14687 | if (!(true && at::functionalization::impl::isFunctionalTensor(found_inf) && at::functionalization::impl::isFunctionalTensor(out))) { |
14688 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(inv_scale))) { |
14689 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14690 | TORCH_INTERNAL_ASSERT(false, |
14691 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14692 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14693 | } else { |
14694 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14695 | at::AutoDispatchSkipFunctionalize guard; |
14696 | at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self_, found_inf_, inv_scale_, out_); |
14697 | ; |
14698 | } |
14699 | } else { |
14700 | ::std::tuple<::std::vector<at::Tensor>,at::Tensor> tmp_output; |
14701 | { |
14702 | at::AutoDispatchSkipFunctionalize guard; |
14703 | tmp_output = at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self_, found_inf_, inv_scale_); |
14704 | } |
14705 | at::functionalization::impl::replace_(found_inf, std::get<0>(tmp_output)); |
14706 | at::functionalization::impl::commit_update(found_inf); |
14707 | at::functionalization::impl::sync(found_inf); |
14708 | at::functionalization::impl::replace_(out, std::get<1>(tmp_output)); |
14709 | at::functionalization::impl::commit_update(out); |
14710 | at::functionalization::impl::sync(out); |
14711 | |
14712 | } |
14713 | } |
14714 | |
14715 | void _amp_foreach_non_finite_check_and_unscale_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) { |
14716 | if (true) { |
14717 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14718 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14719 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14720 | auto self_meta = to_meta(self); |
14721 | auto found_inf_meta = to_meta(found_inf); |
14722 | auto inv_scale_meta = to_meta(inv_scale); |
14723 | at::AutoDispatchSkipFunctionalize func_guard; |
14724 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14725 | at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self_meta, found_inf_meta, inv_scale_meta); |
14726 | } |
14727 | |
14728 | ::std::vector<at::Tensor> self_; |
14729 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14730 | at::functionalization::impl::sync(self); |
14731 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14732 | } else { |
14733 | self_ = self.vec(); |
14734 | } |
14735 | |
14736 | at::Tensor found_inf_; |
14737 | if (at::functionalization::impl::isFunctionalTensor(found_inf)) { |
14738 | at::functionalization::impl::sync(found_inf); |
14739 | found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf); |
14740 | } else { |
14741 | found_inf_ = found_inf; |
14742 | } |
14743 | |
14744 | at::Tensor inv_scale_; |
14745 | if (at::functionalization::impl::isFunctionalTensor(inv_scale)) { |
14746 | at::functionalization::impl::sync(inv_scale); |
14747 | inv_scale_ = at::functionalization::impl::from_functional_tensor(inv_scale); |
14748 | } else { |
14749 | inv_scale_ = inv_scale; |
14750 | } |
14751 | if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(found_inf))) { |
14752 | if ((false || at::functionalization::impl::isFunctionalTensor(inv_scale))) { |
14753 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14754 | TORCH_INTERNAL_ASSERT(false, |
14755 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14756 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14757 | } else { |
14758 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14759 | at::AutoDispatchSkipFunctionalize guard; |
14760 | at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self_, found_inf_, inv_scale_); |
14761 | ; |
14762 | } |
14763 | } else { |
14764 | ::std::tuple<::std::vector<at::Tensor>,at::Tensor> tmp_output; |
14765 | { |
14766 | at::AutoDispatchSkipFunctionalize guard; |
14767 | tmp_output = at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self_, found_inf_, inv_scale_); |
14768 | } |
14769 | at::functionalization::impl::replace_(self, std::get<0>(tmp_output)); |
14770 | at::functionalization::impl::commit_update(self); |
14771 | at::functionalization::impl::sync(self); |
14772 | at::functionalization::impl::replace_(found_inf, std::get<1>(tmp_output)); |
14773 | at::functionalization::impl::commit_update(found_inf); |
14774 | at::functionalization::impl::sync(found_inf); |
14775 | |
14776 | } |
14777 | } |
14778 | |
14779 | void _foreach_sub_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
14780 | if (false) { |
14781 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14782 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14783 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14784 | auto self_meta = to_meta(self); |
14785 | auto out_meta = to_meta(out); |
14786 | at::AutoDispatchSkipFunctionalize func_guard; |
14787 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14788 | at::_ops::_foreach_sub_Scalar_out::call(self_meta, scalar, out_meta); |
14789 | } |
14790 | |
14791 | ::std::vector<at::Tensor> self_; |
14792 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14793 | at::functionalization::impl::sync(self); |
14794 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14795 | } else { |
14796 | self_ = self.vec(); |
14797 | } |
14798 | |
14799 | ::std::vector<at::Tensor> out_; |
14800 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14801 | at::functionalization::impl::sync(out); |
14802 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14803 | } else { |
14804 | out_ = out.vec(); |
14805 | } |
14806 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14807 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14808 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14809 | TORCH_INTERNAL_ASSERT(false, |
14810 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14811 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14812 | } else { |
14813 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14814 | at::AutoDispatchSkipFunctionalize guard; |
14815 | at::_ops::_foreach_sub_Scalar_out::call(self_, scalar, out_); |
14816 | ; |
14817 | } |
14818 | } else { |
14819 | ::std::vector<at::Tensor> tmp_output; |
14820 | { |
14821 | at::AutoDispatchSkipFunctionalize guard; |
14822 | tmp_output = at::_ops::_foreach_sub_Scalar::call(self_, scalar); |
14823 | } |
14824 | at::functionalization::impl::replace_(out, tmp_output); |
14825 | at::functionalization::impl::commit_update(out); |
14826 | at::functionalization::impl::sync(out); |
14827 | |
14828 | } |
14829 | } |
14830 | |
14831 | void _foreach_sub__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { |
14832 | if (true) { |
14833 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14834 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14835 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14836 | auto self_meta = to_meta(self); |
14837 | at::AutoDispatchSkipFunctionalize func_guard; |
14838 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14839 | at::_ops::_foreach_sub__Scalar::call(self_meta, scalar); |
14840 | } |
14841 | |
14842 | ::std::vector<at::Tensor> self_; |
14843 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14844 | at::functionalization::impl::sync(self); |
14845 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14846 | } else { |
14847 | self_ = self.vec(); |
14848 | } |
14849 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14850 | if ((false)) { |
14851 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14852 | TORCH_INTERNAL_ASSERT(false, |
14853 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14854 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14855 | } else { |
14856 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14857 | at::AutoDispatchSkipFunctionalize guard; |
14858 | at::_ops::_foreach_sub__Scalar::call(self_, scalar); |
14859 | ; |
14860 | } |
14861 | } else { |
14862 | ::std::vector<at::Tensor> tmp_output; |
14863 | { |
14864 | at::AutoDispatchSkipFunctionalize guard; |
14865 | tmp_output = at::_ops::_foreach_sub_Scalar::call(self_, scalar); |
14866 | } |
14867 | at::functionalization::impl::replace_(self, tmp_output); |
14868 | at::functionalization::impl::commit_update(self); |
14869 | at::functionalization::impl::sync(self); |
14870 | |
14871 | } |
14872 | } |
14873 | |
14874 | void _foreach_maximum_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
14875 | if (false) { |
14876 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14877 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14878 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14879 | auto self_meta = to_meta(self); |
14880 | auto out_meta = to_meta(out); |
14881 | at::AutoDispatchSkipFunctionalize func_guard; |
14882 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14883 | at::_ops::_foreach_maximum_Scalar_out::call(self_meta, scalar, out_meta); |
14884 | } |
14885 | |
14886 | ::std::vector<at::Tensor> self_; |
14887 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14888 | at::functionalization::impl::sync(self); |
14889 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14890 | } else { |
14891 | self_ = self.vec(); |
14892 | } |
14893 | |
14894 | ::std::vector<at::Tensor> out_; |
14895 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14896 | at::functionalization::impl::sync(out); |
14897 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14898 | } else { |
14899 | out_ = out.vec(); |
14900 | } |
14901 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14902 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14903 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14904 | TORCH_INTERNAL_ASSERT(false, |
14905 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14906 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14907 | } else { |
14908 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14909 | at::AutoDispatchSkipFunctionalize guard; |
14910 | at::_ops::_foreach_maximum_Scalar_out::call(self_, scalar, out_); |
14911 | ; |
14912 | } |
14913 | } else { |
14914 | ::std::vector<at::Tensor> tmp_output; |
14915 | { |
14916 | at::AutoDispatchSkipFunctionalize guard; |
14917 | tmp_output = at::_ops::_foreach_maximum_Scalar::call(self_, scalar); |
14918 | } |
14919 | at::functionalization::impl::replace_(out, tmp_output); |
14920 | at::functionalization::impl::commit_update(out); |
14921 | at::functionalization::impl::sync(out); |
14922 | |
14923 | } |
14924 | } |
14925 | |
14926 | void _foreach_maximum__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { |
14927 | if (true) { |
14928 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14929 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14930 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14931 | auto self_meta = to_meta(self); |
14932 | at::AutoDispatchSkipFunctionalize func_guard; |
14933 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14934 | at::_ops::_foreach_maximum__Scalar::call(self_meta, scalar); |
14935 | } |
14936 | |
14937 | ::std::vector<at::Tensor> self_; |
14938 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14939 | at::functionalization::impl::sync(self); |
14940 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14941 | } else { |
14942 | self_ = self.vec(); |
14943 | } |
14944 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14945 | if ((false)) { |
14946 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14947 | TORCH_INTERNAL_ASSERT(false, |
14948 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14949 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14950 | } else { |
14951 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14952 | at::AutoDispatchSkipFunctionalize guard; |
14953 | at::_ops::_foreach_maximum__Scalar::call(self_, scalar); |
14954 | ; |
14955 | } |
14956 | } else { |
14957 | ::std::vector<at::Tensor> tmp_output; |
14958 | { |
14959 | at::AutoDispatchSkipFunctionalize guard; |
14960 | tmp_output = at::_ops::_foreach_maximum_Scalar::call(self_, scalar); |
14961 | } |
14962 | at::functionalization::impl::replace_(self, tmp_output); |
14963 | at::functionalization::impl::commit_update(self); |
14964 | at::functionalization::impl::sync(self); |
14965 | |
14966 | } |
14967 | } |
14968 | |
14969 | void _foreach_sub_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { |
14970 | if (false) { |
14971 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14972 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14973 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14974 | auto self_meta = to_meta(self); |
14975 | auto other_meta = to_meta(other); |
14976 | auto out_meta = to_meta(out); |
14977 | at::AutoDispatchSkipFunctionalize func_guard; |
14978 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14979 | at::_ops::_foreach_sub_List_out::call(self_meta, other_meta, alpha, out_meta); |
14980 | } |
14981 | |
14982 | ::std::vector<at::Tensor> self_; |
14983 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14984 | at::functionalization::impl::sync(self); |
14985 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14986 | } else { |
14987 | self_ = self.vec(); |
14988 | } |
14989 | |
14990 | ::std::vector<at::Tensor> other_; |
14991 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
14992 | at::functionalization::impl::sync(other); |
14993 | other_ = at::functionalization::impl::from_functional_tensor(other); |
14994 | } else { |
14995 | other_ = other.vec(); |
14996 | } |
14997 | |
14998 | ::std::vector<at::Tensor> out_; |
14999 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15000 | at::functionalization::impl::sync(out); |
15001 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15002 | } else { |
15003 | out_ = out.vec(); |
15004 | } |
15005 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15006 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
15007 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15008 | TORCH_INTERNAL_ASSERT(false, |
15009 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15010 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15011 | } else { |
15012 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15013 | at::AutoDispatchSkipFunctionalize guard; |
15014 | at::_ops::_foreach_sub_List_out::call(self_, other_, alpha, out_); |
15015 | ; |
15016 | } |
15017 | } else { |
15018 | ::std::vector<at::Tensor> tmp_output; |
15019 | { |
15020 | at::AutoDispatchSkipFunctionalize guard; |
15021 | tmp_output = at::_ops::_foreach_sub_List::call(self_, other_, alpha); |
15022 | } |
15023 | at::functionalization::impl::replace_(out, tmp_output); |
15024 | at::functionalization::impl::commit_update(out); |
15025 | at::functionalization::impl::sync(out); |
15026 | |
15027 | } |
15028 | } |
15029 | |
15030 | void _foreach_sub__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha) { |
15031 | if (true) { |
15032 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15033 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15034 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15035 | auto self_meta = to_meta(self); |
15036 | auto other_meta = to_meta(other); |
15037 | at::AutoDispatchSkipFunctionalize func_guard; |
15038 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15039 | at::_ops::_foreach_sub__List::call(self_meta, other_meta, alpha); |
15040 | } |
15041 | |
15042 | ::std::vector<at::Tensor> self_; |
15043 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15044 | at::functionalization::impl::sync(self); |
15045 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15046 | } else { |
15047 | self_ = self.vec(); |
15048 | } |
15049 | |
15050 | ::std::vector<at::Tensor> other_; |
15051 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
15052 | at::functionalization::impl::sync(other); |
15053 | other_ = at::functionalization::impl::from_functional_tensor(other); |
15054 | } else { |
15055 | other_ = other.vec(); |
15056 | } |
15057 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15058 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
15059 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15060 | TORCH_INTERNAL_ASSERT(false, |
15061 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15062 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15063 | } else { |
15064 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15065 | at::AutoDispatchSkipFunctionalize guard; |
15066 | at::_ops::_foreach_sub__List::call(self_, other_, alpha); |
15067 | ; |
15068 | } |
15069 | } else { |
15070 | ::std::vector<at::Tensor> tmp_output; |
15071 | { |
15072 | at::AutoDispatchSkipFunctionalize guard; |
15073 | tmp_output = at::_ops::_foreach_sub_List::call(self_, other_, alpha); |
15074 | } |
15075 | at::functionalization::impl::replace_(self, tmp_output); |
15076 | at::functionalization::impl::commit_update(self); |
15077 | at::functionalization::impl::sync(self); |
15078 | |
15079 | } |
15080 | } |
15081 | |
15082 | void _foreach_maximum_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { |
15083 | if (false) { |
15084 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15085 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15086 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15087 | auto self_meta = to_meta(self); |
15088 | auto other_meta = to_meta(other); |
15089 | auto out_meta = to_meta(out); |
15090 | at::AutoDispatchSkipFunctionalize func_guard; |
15091 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15092 | at::_ops::_foreach_maximum_List_out::call(self_meta, other_meta, out_meta); |
15093 | } |
15094 | |
15095 | ::std::vector<at::Tensor> self_; |
15096 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15097 | at::functionalization::impl::sync(self); |
15098 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15099 | } else { |
15100 | self_ = self.vec(); |
15101 | } |
15102 | |
15103 | ::std::vector<at::Tensor> other_; |
15104 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
15105 | at::functionalization::impl::sync(other); |
15106 | other_ = at::functionalization::impl::from_functional_tensor(other); |
15107 | } else { |
15108 | other_ = other.vec(); |
15109 | } |
15110 | |
15111 | ::std::vector<at::Tensor> out_; |
15112 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15113 | at::functionalization::impl::sync(out); |
15114 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15115 | } else { |
15116 | out_ = out.vec(); |
15117 | } |
15118 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15119 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
15120 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15121 | TORCH_INTERNAL_ASSERT(false, |
15122 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15123 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15124 | } else { |
15125 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15126 | at::AutoDispatchSkipFunctionalize guard; |
15127 | at::_ops::_foreach_maximum_List_out::call(self_, other_, out_); |
15128 | ; |
15129 | } |
15130 | } else { |
15131 | ::std::vector<at::Tensor> tmp_output; |
15132 | { |
15133 | at::AutoDispatchSkipFunctionalize guard; |
15134 | tmp_output = at::_ops::_foreach_maximum_List::call(self_, other_); |
15135 | } |
15136 | at::functionalization::impl::replace_(out, tmp_output); |
15137 | at::functionalization::impl::commit_update(out); |
15138 | at::functionalization::impl::sync(out); |
15139 | |
15140 | } |
15141 | } |
15142 | |
15143 | void _foreach_maximum__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { |
15144 | if (true) { |
15145 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15146 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15147 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15148 | auto self_meta = to_meta(self); |
15149 | auto other_meta = to_meta(other); |
15150 | at::AutoDispatchSkipFunctionalize func_guard; |
15151 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15152 | at::_ops::_foreach_maximum__List::call(self_meta, other_meta); |
15153 | } |
15154 | |
15155 | ::std::vector<at::Tensor> self_; |
15156 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15157 | at::functionalization::impl::sync(self); |
15158 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15159 | } else { |
15160 | self_ = self.vec(); |
15161 | } |
15162 | |
15163 | ::std::vector<at::Tensor> other_; |
15164 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
15165 | at::functionalization::impl::sync(other); |
15166 | other_ = at::functionalization::impl::from_functional_tensor(other); |
15167 | } else { |
15168 | other_ = other.vec(); |
15169 | } |
15170 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15171 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
15172 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15173 | TORCH_INTERNAL_ASSERT(false, |
15174 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15175 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15176 | } else { |
15177 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15178 | at::AutoDispatchSkipFunctionalize guard; |
15179 | at::_ops::_foreach_maximum__List::call(self_, other_); |
15180 | ; |
15181 | } |
15182 | } else { |
15183 | ::std::vector<at::Tensor> tmp_output; |
15184 | { |
15185 | at::AutoDispatchSkipFunctionalize guard; |
15186 | tmp_output = at::_ops::_foreach_maximum_List::call(self_, other_); |
15187 | } |
15188 | at::functionalization::impl::replace_(self, tmp_output); |
15189 | at::functionalization::impl::commit_update(self); |
15190 | at::functionalization::impl::sync(self); |
15191 | |
15192 | } |
15193 | } |
15194 | |
15195 | void _foreach_sub_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
15196 | if (false) { |
15197 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15198 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15199 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15200 | auto self_meta = to_meta(self); |
15201 | auto out_meta = to_meta(out); |
15202 | at::AutoDispatchSkipFunctionalize func_guard; |
15203 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15204 | at::_ops::_foreach_sub_ScalarList_out::call(self_meta, scalars, out_meta); |
15205 | } |
15206 | |
15207 | ::std::vector<at::Tensor> self_; |
15208 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15209 | at::functionalization::impl::sync(self); |
15210 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15211 | } else { |
15212 | self_ = self.vec(); |
15213 | } |
15214 | |
15215 | ::std::vector<at::Tensor> out_; |
15216 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15217 | at::functionalization::impl::sync(out); |
15218 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15219 | } else { |
15220 | out_ = out.vec(); |
15221 | } |
15222 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15223 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15224 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15225 | TORCH_INTERNAL_ASSERT(false, |
15226 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15227 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15228 | } else { |
15229 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15230 | at::AutoDispatchSkipFunctionalize guard; |
15231 | at::_ops::_foreach_sub_ScalarList_out::call(self_, scalars, out_); |
15232 | ; |
15233 | } |
15234 | } else { |
15235 | ::std::vector<at::Tensor> tmp_output; |
15236 | { |
15237 | at::AutoDispatchSkipFunctionalize guard; |
15238 | tmp_output = at::_ops::_foreach_sub_ScalarList::call(self_, scalars); |
15239 | } |
15240 | at::functionalization::impl::replace_(out, tmp_output); |
15241 | at::functionalization::impl::commit_update(out); |
15242 | at::functionalization::impl::sync(out); |
15243 | |
15244 | } |
15245 | } |
15246 | |
15247 | void _foreach_sub__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
15248 | if (true) { |
15249 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15250 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15251 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15252 | auto self_meta = to_meta(self); |
15253 | at::AutoDispatchSkipFunctionalize func_guard; |
15254 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15255 | at::_ops::_foreach_sub__ScalarList::call(self_meta, scalars); |
15256 | } |
15257 | |
15258 | ::std::vector<at::Tensor> self_; |
15259 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15260 | at::functionalization::impl::sync(self); |
15261 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15262 | } else { |
15263 | self_ = self.vec(); |
15264 | } |
15265 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15266 | if ((false)) { |
15267 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15268 | TORCH_INTERNAL_ASSERT(false, |
15269 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15270 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15271 | } else { |
15272 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15273 | at::AutoDispatchSkipFunctionalize guard; |
15274 | at::_ops::_foreach_sub__ScalarList::call(self_, scalars); |
15275 | ; |
15276 | } |
15277 | } else { |
15278 | ::std::vector<at::Tensor> tmp_output; |
15279 | { |
15280 | at::AutoDispatchSkipFunctionalize guard; |
15281 | tmp_output = at::_ops::_foreach_sub_ScalarList::call(self_, scalars); |
15282 | } |
15283 | at::functionalization::impl::replace_(self, tmp_output); |
15284 | at::functionalization::impl::commit_update(self); |
15285 | at::functionalization::impl::sync(self); |
15286 | |
15287 | } |
15288 | } |
15289 | |
15290 | void _foreach_maximum_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
15291 | if (false) { |
15292 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15293 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15294 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15295 | auto self_meta = to_meta(self); |
15296 | auto out_meta = to_meta(out); |
15297 | at::AutoDispatchSkipFunctionalize func_guard; |
15298 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15299 | at::_ops::_foreach_maximum_ScalarList_out::call(self_meta, scalars, out_meta); |
15300 | } |
15301 | |
15302 | ::std::vector<at::Tensor> self_; |
15303 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15304 | at::functionalization::impl::sync(self); |
15305 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15306 | } else { |
15307 | self_ = self.vec(); |
15308 | } |
15309 | |
15310 | ::std::vector<at::Tensor> out_; |
15311 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15312 | at::functionalization::impl::sync(out); |
15313 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15314 | } else { |
15315 | out_ = out.vec(); |
15316 | } |
15317 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15318 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15319 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15320 | TORCH_INTERNAL_ASSERT(false, |
15321 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15322 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15323 | } else { |
15324 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15325 | at::AutoDispatchSkipFunctionalize guard; |
15326 | at::_ops::_foreach_maximum_ScalarList_out::call(self_, scalars, out_); |
15327 | ; |
15328 | } |
15329 | } else { |
15330 | ::std::vector<at::Tensor> tmp_output; |
15331 | { |
15332 | at::AutoDispatchSkipFunctionalize guard; |
15333 | tmp_output = at::_ops::_foreach_maximum_ScalarList::call(self_, scalars); |
15334 | } |
15335 | at::functionalization::impl::replace_(out, tmp_output); |
15336 | at::functionalization::impl::commit_update(out); |
15337 | at::functionalization::impl::sync(out); |
15338 | |
15339 | } |
15340 | } |
15341 | |
15342 | void _foreach_maximum__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
15343 | if (true) { |
15344 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15345 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15346 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15347 | auto self_meta = to_meta(self); |
15348 | at::AutoDispatchSkipFunctionalize func_guard; |
15349 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15350 | at::_ops::_foreach_maximum__ScalarList::call(self_meta, scalars); |
15351 | } |
15352 | |
15353 | ::std::vector<at::Tensor> self_; |
15354 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15355 | at::functionalization::impl::sync(self); |
15356 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15357 | } else { |
15358 | self_ = self.vec(); |
15359 | } |
15360 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15361 | if ((false)) { |
15362 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15363 | TORCH_INTERNAL_ASSERT(false, |
15364 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15365 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15366 | } else { |
15367 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15368 | at::AutoDispatchSkipFunctionalize guard; |
15369 | at::_ops::_foreach_maximum__ScalarList::call(self_, scalars); |
15370 | ; |
15371 | } |
15372 | } else { |
15373 | ::std::vector<at::Tensor> tmp_output; |
15374 | { |
15375 | at::AutoDispatchSkipFunctionalize guard; |
15376 | tmp_output = at::_ops::_foreach_maximum_ScalarList::call(self_, scalars); |
15377 | } |
15378 | at::functionalization::impl::replace_(self, tmp_output); |
15379 | at::functionalization::impl::commit_update(self); |
15380 | at::functionalization::impl::sync(self); |
15381 | |
15382 | } |
15383 | } |
15384 | |
15385 | void _foreach_exp_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
15386 | if (false) { |
15387 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15388 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15389 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15390 | auto self_meta = to_meta(self); |
15391 | auto out_meta = to_meta(out); |
15392 | at::AutoDispatchSkipFunctionalize func_guard; |
15393 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15394 | at::_ops::_foreach_exp_out::call(self_meta, out_meta); |
15395 | } |
15396 | |
15397 | ::std::vector<at::Tensor> self_; |
15398 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15399 | at::functionalization::impl::sync(self); |
15400 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15401 | } else { |
15402 | self_ = self.vec(); |
15403 | } |
15404 | |
15405 | ::std::vector<at::Tensor> out_; |
15406 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15407 | at::functionalization::impl::sync(out); |
15408 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15409 | } else { |
15410 | out_ = out.vec(); |
15411 | } |
15412 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15413 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15414 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15415 | TORCH_INTERNAL_ASSERT(false, |
15416 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15417 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15418 | } else { |
15419 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15420 | at::AutoDispatchSkipFunctionalize guard; |
15421 | at::_ops::_foreach_exp_out::call(self_, out_); |
15422 | ; |
15423 | } |
15424 | } else { |
15425 | ::std::vector<at::Tensor> tmp_output; |
15426 | { |
15427 | at::AutoDispatchSkipFunctionalize guard; |
15428 | tmp_output = at::_ops::_foreach_exp::call(self_); |
15429 | } |
15430 | at::functionalization::impl::replace_(out, tmp_output); |
15431 | at::functionalization::impl::commit_update(out); |
15432 | at::functionalization::impl::sync(out); |
15433 | |
15434 | } |
15435 | } |
15436 | |
15437 | void _foreach_exp_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
15438 | if (true) { |
15439 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15440 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15441 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15442 | auto self_meta = to_meta(self); |
15443 | at::AutoDispatchSkipFunctionalize func_guard; |
15444 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15445 | at::_ops::_foreach_exp_::call(self_meta); |
15446 | } |
15447 | |
15448 | ::std::vector<at::Tensor> self_; |
15449 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15450 | at::functionalization::impl::sync(self); |
15451 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15452 | } else { |
15453 | self_ = self.vec(); |
15454 | } |
15455 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15456 | if ((false)) { |
15457 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15458 | TORCH_INTERNAL_ASSERT(false, |
15459 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15460 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15461 | } else { |
15462 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15463 | at::AutoDispatchSkipFunctionalize guard; |
15464 | at::_ops::_foreach_exp_::call(self_); |
15465 | ; |
15466 | } |
15467 | } else { |
15468 | ::std::vector<at::Tensor> tmp_output; |
15469 | { |
15470 | at::AutoDispatchSkipFunctionalize guard; |
15471 | tmp_output = at::_ops::_foreach_exp::call(self_); |
15472 | } |
15473 | at::functionalization::impl::replace_(self, tmp_output); |
15474 | at::functionalization::impl::commit_update(self); |
15475 | at::functionalization::impl::sync(self); |
15476 | |
15477 | } |
15478 | } |
15479 | |
15480 | void _foreach_sqrt_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
15481 | if (false) { |
15482 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15483 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15484 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15485 | auto self_meta = to_meta(self); |
15486 | auto out_meta = to_meta(out); |
15487 | at::AutoDispatchSkipFunctionalize func_guard; |
15488 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15489 | at::_ops::_foreach_sqrt_out::call(self_meta, out_meta); |
15490 | } |
15491 | |
15492 | ::std::vector<at::Tensor> self_; |
15493 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15494 | at::functionalization::impl::sync(self); |
15495 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15496 | } else { |
15497 | self_ = self.vec(); |
15498 | } |
15499 | |
15500 | ::std::vector<at::Tensor> out_; |
15501 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15502 | at::functionalization::impl::sync(out); |
15503 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15504 | } else { |
15505 | out_ = out.vec(); |
15506 | } |
15507 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15508 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15509 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15510 | TORCH_INTERNAL_ASSERT(false, |
15511 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15512 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15513 | } else { |
15514 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15515 | at::AutoDispatchSkipFunctionalize guard; |
15516 | at::_ops::_foreach_sqrt_out::call(self_, out_); |
15517 | ; |
15518 | } |
15519 | } else { |
15520 | ::std::vector<at::Tensor> tmp_output; |
15521 | { |
15522 | at::AutoDispatchSkipFunctionalize guard; |
15523 | tmp_output = at::_ops::_foreach_sqrt::call(self_); |
15524 | } |
15525 | at::functionalization::impl::replace_(out, tmp_output); |
15526 | at::functionalization::impl::commit_update(out); |
15527 | at::functionalization::impl::sync(out); |
15528 | |
15529 | } |
15530 | } |
15531 | |
15532 | void _foreach_sqrt_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
15533 | if (true) { |
15534 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15535 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15536 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15537 | auto self_meta = to_meta(self); |
15538 | at::AutoDispatchSkipFunctionalize func_guard; |
15539 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15540 | at::_ops::_foreach_sqrt_::call(self_meta); |
15541 | } |
15542 | |
15543 | ::std::vector<at::Tensor> self_; |
15544 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15545 | at::functionalization::impl::sync(self); |
15546 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15547 | } else { |
15548 | self_ = self.vec(); |
15549 | } |
15550 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15551 | if ((false)) { |
15552 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15553 | TORCH_INTERNAL_ASSERT(false, |
15554 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15555 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15556 | } else { |
15557 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15558 | at::AutoDispatchSkipFunctionalize guard; |
15559 | at::_ops::_foreach_sqrt_::call(self_); |
15560 | ; |
15561 | } |
15562 | } else { |
15563 | ::std::vector<at::Tensor> tmp_output; |
15564 | { |
15565 | at::AutoDispatchSkipFunctionalize guard; |
15566 | tmp_output = at::_ops::_foreach_sqrt::call(self_); |
15567 | } |
15568 | at::functionalization::impl::replace_(self, tmp_output); |
15569 | at::functionalization::impl::commit_update(self); |
15570 | at::functionalization::impl::sync(self); |
15571 | |
15572 | } |
15573 | } |
15574 | |
15575 | void _foreach_abs_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
15576 | if (false) { |
15577 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15578 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15579 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15580 | auto self_meta = to_meta(self); |
15581 | auto out_meta = to_meta(out); |
15582 | at::AutoDispatchSkipFunctionalize func_guard; |
15583 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15584 | at::_ops::_foreach_abs_out::call(self_meta, out_meta); |
15585 | } |
15586 | |
15587 | ::std::vector<at::Tensor> self_; |
15588 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15589 | at::functionalization::impl::sync(self); |
15590 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15591 | } else { |
15592 | self_ = self.vec(); |
15593 | } |
15594 | |
15595 | ::std::vector<at::Tensor> out_; |
15596 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15597 | at::functionalization::impl::sync(out); |
15598 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15599 | } else { |
15600 | out_ = out.vec(); |
15601 | } |
15602 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15603 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15604 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15605 | TORCH_INTERNAL_ASSERT(false, |
15606 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15607 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15608 | } else { |
15609 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15610 | at::AutoDispatchSkipFunctionalize guard; |
15611 | at::_ops::_foreach_abs_out::call(self_, out_); |
15612 | ; |
15613 | } |
15614 | } else { |
15615 | ::std::vector<at::Tensor> tmp_output; |
15616 | { |
15617 | at::AutoDispatchSkipFunctionalize guard; |
15618 | tmp_output = at::_ops::_foreach_abs::call(self_); |
15619 | } |
15620 | at::functionalization::impl::replace_(out, tmp_output); |
15621 | at::functionalization::impl::commit_update(out); |
15622 | at::functionalization::impl::sync(out); |
15623 | |
15624 | } |
15625 | } |
15626 | |
15627 | void _foreach_abs_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
15628 | if (true) { |
15629 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15630 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15631 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15632 | auto self_meta = to_meta(self); |
15633 | at::AutoDispatchSkipFunctionalize func_guard; |
15634 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15635 | at::_ops::_foreach_abs_::call(self_meta); |
15636 | } |
15637 | |
15638 | ::std::vector<at::Tensor> self_; |
15639 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15640 | at::functionalization::impl::sync(self); |
15641 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15642 | } else { |
15643 | self_ = self.vec(); |
15644 | } |
15645 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15646 | if ((false)) { |
15647 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15648 | TORCH_INTERNAL_ASSERT(false, |
15649 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15650 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15651 | } else { |
15652 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15653 | at::AutoDispatchSkipFunctionalize guard; |
15654 | at::_ops::_foreach_abs_::call(self_); |
15655 | ; |
15656 | } |
15657 | } else { |
15658 | ::std::vector<at::Tensor> tmp_output; |
15659 | { |
15660 | at::AutoDispatchSkipFunctionalize guard; |
15661 | tmp_output = at::_ops::_foreach_abs::call(self_); |
15662 | } |
15663 | at::functionalization::impl::replace_(self, tmp_output); |
15664 | at::functionalization::impl::commit_update(self); |
15665 | at::functionalization::impl::sync(self); |
15666 | |
15667 | } |
15668 | } |
15669 | |
15670 | void _foreach_acos_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
15671 | if (false) { |
15672 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15673 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15674 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15675 | auto self_meta = to_meta(self); |
15676 | auto out_meta = to_meta(out); |
15677 | at::AutoDispatchSkipFunctionalize func_guard; |
15678 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15679 | at::_ops::_foreach_acos_out::call(self_meta, out_meta); |
15680 | } |
15681 | |
15682 | ::std::vector<at::Tensor> self_; |
15683 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15684 | at::functionalization::impl::sync(self); |
15685 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15686 | } else { |
15687 | self_ = self.vec(); |
15688 | } |
15689 | |
15690 | ::std::vector<at::Tensor> out_; |
15691 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15692 | at::functionalization::impl::sync(out); |
15693 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15694 | } else { |
15695 | out_ = out.vec(); |
15696 | } |
15697 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15698 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15699 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15700 | TORCH_INTERNAL_ASSERT(false, |
15701 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15702 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15703 | } else { |
15704 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15705 | at::AutoDispatchSkipFunctionalize guard; |
15706 | at::_ops::_foreach_acos_out::call(self_, out_); |
15707 | ; |
15708 | } |
15709 | } else { |
15710 | ::std::vector<at::Tensor> tmp_output; |
15711 | { |
15712 | at::AutoDispatchSkipFunctionalize guard; |
15713 | tmp_output = at::_ops::_foreach_acos::call(self_); |
15714 | } |
15715 | at::functionalization::impl::replace_(out, tmp_output); |
15716 | at::functionalization::impl::commit_update(out); |
15717 | at::functionalization::impl::sync(out); |
15718 | |
15719 | } |
15720 | } |
15721 | |
15722 | void _foreach_acos_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
15723 | if (true) { |
15724 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15725 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15726 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15727 | auto self_meta = to_meta(self); |
15728 | at::AutoDispatchSkipFunctionalize func_guard; |
15729 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15730 | at::_ops::_foreach_acos_::call(self_meta); |
15731 | } |
15732 | |
15733 | ::std::vector<at::Tensor> self_; |
15734 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15735 | at::functionalization::impl::sync(self); |
15736 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15737 | } else { |
15738 | self_ = self.vec(); |
15739 | } |
15740 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15741 | if ((false)) { |
15742 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15743 | TORCH_INTERNAL_ASSERT(false, |
15744 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15745 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15746 | } else { |
15747 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15748 | at::AutoDispatchSkipFunctionalize guard; |
15749 | at::_ops::_foreach_acos_::call(self_); |
15750 | ; |
15751 | } |
15752 | } else { |
15753 | ::std::vector<at::Tensor> tmp_output; |
15754 | { |
15755 | at::AutoDispatchSkipFunctionalize guard; |
15756 | tmp_output = at::_ops::_foreach_acos::call(self_); |
15757 | } |
15758 | at::functionalization::impl::replace_(self, tmp_output); |
15759 | at::functionalization::impl::commit_update(self); |
15760 | at::functionalization::impl::sync(self); |
15761 | |
15762 | } |
15763 | } |
15764 | |
15765 | void _foreach_cos_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
15766 | if (false) { |
15767 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15768 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15769 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15770 | auto self_meta = to_meta(self); |
15771 | auto out_meta = to_meta(out); |
15772 | at::AutoDispatchSkipFunctionalize func_guard; |
15773 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15774 | at::_ops::_foreach_cos_out::call(self_meta, out_meta); |
15775 | } |
15776 | |
15777 | ::std::vector<at::Tensor> self_; |
15778 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15779 | at::functionalization::impl::sync(self); |
15780 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15781 | } else { |
15782 | self_ = self.vec(); |
15783 | } |
15784 | |
15785 | ::std::vector<at::Tensor> out_; |
15786 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15787 | at::functionalization::impl::sync(out); |
15788 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15789 | } else { |
15790 | out_ = out.vec(); |
15791 | } |
15792 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15793 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15794 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15795 | TORCH_INTERNAL_ASSERT(false, |
15796 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15797 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15798 | } else { |
15799 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15800 | at::AutoDispatchSkipFunctionalize guard; |
15801 | at::_ops::_foreach_cos_out::call(self_, out_); |
15802 | ; |
15803 | } |
15804 | } else { |
15805 | ::std::vector<at::Tensor> tmp_output; |
15806 | { |
15807 | at::AutoDispatchSkipFunctionalize guard; |
15808 | tmp_output = at::_ops::_foreach_cos::call(self_); |
15809 | } |
15810 | at::functionalization::impl::replace_(out, tmp_output); |
15811 | at::functionalization::impl::commit_update(out); |
15812 | at::functionalization::impl::sync(out); |
15813 | |
15814 | } |
15815 | } |
15816 | |
15817 | void _foreach_cos_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
15818 | if (true) { |
15819 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15820 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15821 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15822 | auto self_meta = to_meta(self); |
15823 | at::AutoDispatchSkipFunctionalize func_guard; |
15824 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15825 | at::_ops::_foreach_cos_::call(self_meta); |
15826 | } |
15827 | |
15828 | ::std::vector<at::Tensor> self_; |
15829 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15830 | at::functionalization::impl::sync(self); |
15831 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15832 | } else { |
15833 | self_ = self.vec(); |
15834 | } |
15835 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15836 | if ((false)) { |
15837 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15838 | TORCH_INTERNAL_ASSERT(false, |
15839 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15840 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15841 | } else { |
15842 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15843 | at::AutoDispatchSkipFunctionalize guard; |
15844 | at::_ops::_foreach_cos_::call(self_); |
15845 | ; |
15846 | } |
15847 | } else { |
15848 | ::std::vector<at::Tensor> tmp_output; |
15849 | { |
15850 | at::AutoDispatchSkipFunctionalize guard; |
15851 | tmp_output = at::_ops::_foreach_cos::call(self_); |
15852 | } |
15853 | at::functionalization::impl::replace_(self, tmp_output); |
15854 | at::functionalization::impl::commit_update(self); |
15855 | at::functionalization::impl::sync(self); |
15856 | |
15857 | } |
15858 | } |
15859 | |
15860 | void _foreach_floor_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
15861 | if (false) { |
15862 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15863 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15864 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15865 | auto self_meta = to_meta(self); |
15866 | auto out_meta = to_meta(out); |
15867 | at::AutoDispatchSkipFunctionalize func_guard; |
15868 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15869 | at::_ops::_foreach_floor_out::call(self_meta, out_meta); |
15870 | } |
15871 | |
15872 | ::std::vector<at::Tensor> self_; |
15873 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15874 | at::functionalization::impl::sync(self); |
15875 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15876 | } else { |
15877 | self_ = self.vec(); |
15878 | } |
15879 | |
15880 | ::std::vector<at::Tensor> out_; |
15881 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15882 | at::functionalization::impl::sync(out); |
15883 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15884 | } else { |
15885 | out_ = out.vec(); |
15886 | } |
15887 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15888 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15889 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15890 | TORCH_INTERNAL_ASSERT(false, |
15891 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15892 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15893 | } else { |
15894 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15895 | at::AutoDispatchSkipFunctionalize guard; |
15896 | at::_ops::_foreach_floor_out::call(self_, out_); |
15897 | ; |
15898 | } |
15899 | } else { |
15900 | ::std::vector<at::Tensor> tmp_output; |
15901 | { |
15902 | at::AutoDispatchSkipFunctionalize guard; |
15903 | tmp_output = at::_ops::_foreach_floor::call(self_); |
15904 | } |
15905 | at::functionalization::impl::replace_(out, tmp_output); |
15906 | at::functionalization::impl::commit_update(out); |
15907 | at::functionalization::impl::sync(out); |
15908 | |
15909 | } |
15910 | } |
15911 | |
15912 | void _foreach_floor_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
15913 | if (true) { |
15914 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15915 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15916 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15917 | auto self_meta = to_meta(self); |
15918 | at::AutoDispatchSkipFunctionalize func_guard; |
15919 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15920 | at::_ops::_foreach_floor_::call(self_meta); |
15921 | } |
15922 | |
15923 | ::std::vector<at::Tensor> self_; |
15924 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15925 | at::functionalization::impl::sync(self); |
15926 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15927 | } else { |
15928 | self_ = self.vec(); |
15929 | } |
15930 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15931 | if ((false)) { |
15932 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15933 | TORCH_INTERNAL_ASSERT(false, |
15934 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15935 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15936 | } else { |
15937 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15938 | at::AutoDispatchSkipFunctionalize guard; |
15939 | at::_ops::_foreach_floor_::call(self_); |
15940 | ; |
15941 | } |
15942 | } else { |
15943 | ::std::vector<at::Tensor> tmp_output; |
15944 | { |
15945 | at::AutoDispatchSkipFunctionalize guard; |
15946 | tmp_output = at::_ops::_foreach_floor::call(self_); |
15947 | } |
15948 | at::functionalization::impl::replace_(self, tmp_output); |
15949 | at::functionalization::impl::commit_update(self); |
15950 | at::functionalization::impl::sync(self); |
15951 | |
15952 | } |
15953 | } |
15954 | |
15955 | void _foreach_log10_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
15956 | if (false) { |
15957 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15958 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15959 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15960 | auto self_meta = to_meta(self); |
15961 | auto out_meta = to_meta(out); |
15962 | at::AutoDispatchSkipFunctionalize func_guard; |
15963 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15964 | at::_ops::_foreach_log10_out::call(self_meta, out_meta); |
15965 | } |
15966 | |
15967 | ::std::vector<at::Tensor> self_; |
15968 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15969 | at::functionalization::impl::sync(self); |
15970 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15971 | } else { |
15972 | self_ = self.vec(); |
15973 | } |
15974 | |
15975 | ::std::vector<at::Tensor> out_; |
15976 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15977 | at::functionalization::impl::sync(out); |
15978 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15979 | } else { |
15980 | out_ = out.vec(); |
15981 | } |
15982 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15983 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15984 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15985 | TORCH_INTERNAL_ASSERT(false, |
15986 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15987 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15988 | } else { |
15989 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15990 | at::AutoDispatchSkipFunctionalize guard; |
15991 | at::_ops::_foreach_log10_out::call(self_, out_); |
15992 | ; |
15993 | } |
15994 | } else { |
15995 | ::std::vector<at::Tensor> tmp_output; |
15996 | { |
15997 | at::AutoDispatchSkipFunctionalize guard; |
15998 | tmp_output = at::_ops::_foreach_log10::call(self_); |
15999 | } |
16000 | at::functionalization::impl::replace_(out, tmp_output); |
16001 | at::functionalization::impl::commit_update(out); |
16002 | at::functionalization::impl::sync(out); |
16003 | |
16004 | } |
16005 | } |
16006 | |
16007 | void _foreach_log10_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
16008 | if (true) { |
16009 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16010 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16011 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16012 | auto self_meta = to_meta(self); |
16013 | at::AutoDispatchSkipFunctionalize func_guard; |
16014 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16015 | at::_ops::_foreach_log10_::call(self_meta); |
16016 | } |
16017 | |
16018 | ::std::vector<at::Tensor> self_; |
16019 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16020 | at::functionalization::impl::sync(self); |
16021 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16022 | } else { |
16023 | self_ = self.vec(); |
16024 | } |
16025 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16026 | if ((false)) { |
16027 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16028 | TORCH_INTERNAL_ASSERT(false, |
16029 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16030 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16031 | } else { |
16032 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16033 | at::AutoDispatchSkipFunctionalize guard; |
16034 | at::_ops::_foreach_log10_::call(self_); |
16035 | ; |
16036 | } |
16037 | } else { |
16038 | ::std::vector<at::Tensor> tmp_output; |
16039 | { |
16040 | at::AutoDispatchSkipFunctionalize guard; |
16041 | tmp_output = at::_ops::_foreach_log10::call(self_); |
16042 | } |
16043 | at::functionalization::impl::replace_(self, tmp_output); |
16044 | at::functionalization::impl::commit_update(self); |
16045 | at::functionalization::impl::sync(self); |
16046 | |
16047 | } |
16048 | } |
16049 | |
16050 | void _foreach_neg_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
16051 | if (false) { |
16052 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16053 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16054 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16055 | auto self_meta = to_meta(self); |
16056 | auto out_meta = to_meta(out); |
16057 | at::AutoDispatchSkipFunctionalize func_guard; |
16058 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16059 | at::_ops::_foreach_neg_out::call(self_meta, out_meta); |
16060 | } |
16061 | |
16062 | ::std::vector<at::Tensor> self_; |
16063 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16064 | at::functionalization::impl::sync(self); |
16065 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16066 | } else { |
16067 | self_ = self.vec(); |
16068 | } |
16069 | |
16070 | ::std::vector<at::Tensor> out_; |
16071 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16072 | at::functionalization::impl::sync(out); |
16073 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16074 | } else { |
16075 | out_ = out.vec(); |
16076 | } |
16077 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16078 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16079 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16080 | TORCH_INTERNAL_ASSERT(false, |
16081 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16082 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16083 | } else { |
16084 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16085 | at::AutoDispatchSkipFunctionalize guard; |
16086 | at::_ops::_foreach_neg_out::call(self_, out_); |
16087 | ; |
16088 | } |
16089 | } else { |
16090 | ::std::vector<at::Tensor> tmp_output; |
16091 | { |
16092 | at::AutoDispatchSkipFunctionalize guard; |
16093 | tmp_output = at::_ops::_foreach_neg::call(self_); |
16094 | } |
16095 | at::functionalization::impl::replace_(out, tmp_output); |
16096 | at::functionalization::impl::commit_update(out); |
16097 | at::functionalization::impl::sync(out); |
16098 | |
16099 | } |
16100 | } |
16101 | |
16102 | void _foreach_neg_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
16103 | if (true) { |
16104 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16105 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16106 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16107 | auto self_meta = to_meta(self); |
16108 | at::AutoDispatchSkipFunctionalize func_guard; |
16109 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16110 | at::_ops::_foreach_neg_::call(self_meta); |
16111 | } |
16112 | |
16113 | ::std::vector<at::Tensor> self_; |
16114 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16115 | at::functionalization::impl::sync(self); |
16116 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16117 | } else { |
16118 | self_ = self.vec(); |
16119 | } |
16120 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16121 | if ((false)) { |
16122 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16123 | TORCH_INTERNAL_ASSERT(false, |
16124 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16125 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16126 | } else { |
16127 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16128 | at::AutoDispatchSkipFunctionalize guard; |
16129 | at::_ops::_foreach_neg_::call(self_); |
16130 | ; |
16131 | } |
16132 | } else { |
16133 | ::std::vector<at::Tensor> tmp_output; |
16134 | { |
16135 | at::AutoDispatchSkipFunctionalize guard; |
16136 | tmp_output = at::_ops::_foreach_neg::call(self_); |
16137 | } |
16138 | at::functionalization::impl::replace_(self, tmp_output); |
16139 | at::functionalization::impl::commit_update(self); |
16140 | at::functionalization::impl::sync(self); |
16141 | |
16142 | } |
16143 | } |
16144 | |
16145 | void _foreach_tan_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
16146 | if (false) { |
16147 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16148 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16149 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16150 | auto self_meta = to_meta(self); |
16151 | auto out_meta = to_meta(out); |
16152 | at::AutoDispatchSkipFunctionalize func_guard; |
16153 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16154 | at::_ops::_foreach_tan_out::call(self_meta, out_meta); |
16155 | } |
16156 | |
16157 | ::std::vector<at::Tensor> self_; |
16158 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16159 | at::functionalization::impl::sync(self); |
16160 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16161 | } else { |
16162 | self_ = self.vec(); |
16163 | } |
16164 | |
16165 | ::std::vector<at::Tensor> out_; |
16166 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16167 | at::functionalization::impl::sync(out); |
16168 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16169 | } else { |
16170 | out_ = out.vec(); |
16171 | } |
16172 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16173 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16174 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16175 | TORCH_INTERNAL_ASSERT(false, |
16176 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16177 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16178 | } else { |
16179 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16180 | at::AutoDispatchSkipFunctionalize guard; |
16181 | at::_ops::_foreach_tan_out::call(self_, out_); |
16182 | ; |
16183 | } |
16184 | } else { |
16185 | ::std::vector<at::Tensor> tmp_output; |
16186 | { |
16187 | at::AutoDispatchSkipFunctionalize guard; |
16188 | tmp_output = at::_ops::_foreach_tan::call(self_); |
16189 | } |
16190 | at::functionalization::impl::replace_(out, tmp_output); |
16191 | at::functionalization::impl::commit_update(out); |
16192 | at::functionalization::impl::sync(out); |
16193 | |
16194 | } |
16195 | } |
16196 | |
16197 | void _foreach_tan_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
16198 | if (true) { |
16199 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16200 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16201 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16202 | auto self_meta = to_meta(self); |
16203 | at::AutoDispatchSkipFunctionalize func_guard; |
16204 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16205 | at::_ops::_foreach_tan_::call(self_meta); |
16206 | } |
16207 | |
16208 | ::std::vector<at::Tensor> self_; |
16209 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16210 | at::functionalization::impl::sync(self); |
16211 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16212 | } else { |
16213 | self_ = self.vec(); |
16214 | } |
16215 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16216 | if ((false)) { |
16217 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16218 | TORCH_INTERNAL_ASSERT(false, |
16219 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16220 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16221 | } else { |
16222 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16223 | at::AutoDispatchSkipFunctionalize guard; |
16224 | at::_ops::_foreach_tan_::call(self_); |
16225 | ; |
16226 | } |
16227 | } else { |
16228 | ::std::vector<at::Tensor> tmp_output; |
16229 | { |
16230 | at::AutoDispatchSkipFunctionalize guard; |
16231 | tmp_output = at::_ops::_foreach_tan::call(self_); |
16232 | } |
16233 | at::functionalization::impl::replace_(self, tmp_output); |
16234 | at::functionalization::impl::commit_update(self); |
16235 | at::functionalization::impl::sync(self); |
16236 | |
16237 | } |
16238 | } |
16239 | |
16240 | void _foreach_sigmoid_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
16241 | if (false) { |
16242 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16243 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16244 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16245 | auto self_meta = to_meta(self); |
16246 | auto out_meta = to_meta(out); |
16247 | at::AutoDispatchSkipFunctionalize func_guard; |
16248 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16249 | at::_ops::_foreach_sigmoid_out::call(self_meta, out_meta); |
16250 | } |
16251 | |
16252 | ::std::vector<at::Tensor> self_; |
16253 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16254 | at::functionalization::impl::sync(self); |
16255 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16256 | } else { |
16257 | self_ = self.vec(); |
16258 | } |
16259 | |
16260 | ::std::vector<at::Tensor> out_; |
16261 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16262 | at::functionalization::impl::sync(out); |
16263 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16264 | } else { |
16265 | out_ = out.vec(); |
16266 | } |
16267 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16268 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16269 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16270 | TORCH_INTERNAL_ASSERT(false, |
16271 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16272 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16273 | } else { |
16274 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16275 | at::AutoDispatchSkipFunctionalize guard; |
16276 | at::_ops::_foreach_sigmoid_out::call(self_, out_); |
16277 | ; |
16278 | } |
16279 | } else { |
16280 | ::std::vector<at::Tensor> tmp_output; |
16281 | { |
16282 | at::AutoDispatchSkipFunctionalize guard; |
16283 | tmp_output = at::_ops::_foreach_sigmoid::call(self_); |
16284 | } |
16285 | at::functionalization::impl::replace_(out, tmp_output); |
16286 | at::functionalization::impl::commit_update(out); |
16287 | at::functionalization::impl::sync(out); |
16288 | |
16289 | } |
16290 | } |
16291 | |
16292 | void _foreach_sigmoid_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
16293 | if (true) { |
16294 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16295 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16296 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16297 | auto self_meta = to_meta(self); |
16298 | at::AutoDispatchSkipFunctionalize func_guard; |
16299 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16300 | at::_ops::_foreach_sigmoid_::call(self_meta); |
16301 | } |
16302 | |
16303 | ::std::vector<at::Tensor> self_; |
16304 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16305 | at::functionalization::impl::sync(self); |
16306 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16307 | } else { |
16308 | self_ = self.vec(); |
16309 | } |
16310 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16311 | if ((false)) { |
16312 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16313 | TORCH_INTERNAL_ASSERT(false, |
16314 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16315 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16316 | } else { |
16317 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16318 | at::AutoDispatchSkipFunctionalize guard; |
16319 | at::_ops::_foreach_sigmoid_::call(self_); |
16320 | ; |
16321 | } |
16322 | } else { |
16323 | ::std::vector<at::Tensor> tmp_output; |
16324 | { |
16325 | at::AutoDispatchSkipFunctionalize guard; |
16326 | tmp_output = at::_ops::_foreach_sigmoid::call(self_); |
16327 | } |
16328 | at::functionalization::impl::replace_(self, tmp_output); |
16329 | at::functionalization::impl::commit_update(self); |
16330 | at::functionalization::impl::sync(self); |
16331 | |
16332 | } |
16333 | } |
16334 | |
16335 | void _foreach_norm_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & ord, at::TensorList out) { |
16336 | if (false) { |
16337 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16338 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16339 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16340 | auto self_meta = to_meta(self); |
16341 | auto out_meta = to_meta(out); |
16342 | at::AutoDispatchSkipFunctionalize func_guard; |
16343 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16344 | at::_ops::_foreach_norm_Scalar_out::call(self_meta, ord, out_meta); |
16345 | } |
16346 | |
16347 | ::std::vector<at::Tensor> self_; |
16348 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16349 | at::functionalization::impl::sync(self); |
16350 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16351 | } else { |
16352 | self_ = self.vec(); |
16353 | } |
16354 | |
16355 | ::std::vector<at::Tensor> out_; |
16356 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16357 | at::functionalization::impl::sync(out); |
16358 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16359 | } else { |
16360 | out_ = out.vec(); |
16361 | } |
16362 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16363 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16364 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16365 | TORCH_INTERNAL_ASSERT(false, |
16366 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16367 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16368 | } else { |
16369 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16370 | at::AutoDispatchSkipFunctionalize guard; |
16371 | at::_ops::_foreach_norm_Scalar_out::call(self_, ord, out_); |
16372 | ; |
16373 | } |
16374 | } else { |
16375 | ::std::vector<at::Tensor> tmp_output; |
16376 | { |
16377 | at::AutoDispatchSkipFunctionalize guard; |
16378 | tmp_output = at::_ops::_foreach_norm_Scalar::call(self_, ord); |
16379 | } |
16380 | at::functionalization::impl::replace_(out, tmp_output); |
16381 | at::functionalization::impl::commit_update(out); |
16382 | at::functionalization::impl::sync(out); |
16383 | |
16384 | } |
16385 | } |
16386 | |
16387 | at::Tensor & searchsorted_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter, at::Tensor & out) { |
16388 | if (false) { |
16389 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16390 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16391 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16392 | auto sorted_sequence_meta = to_meta(sorted_sequence); |
16393 | auto self_meta = to_meta(self); |
16394 | auto sorter_meta = to_meta(sorter); |
16395 | auto out_meta = to_meta(out); |
16396 | at::AutoDispatchSkipFunctionalize func_guard; |
16397 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16398 | at::_ops::searchsorted_Tensor_out::call(sorted_sequence_meta, self_meta, out_int32, right, side, sorter_meta, out_meta); |
16399 | } |
16400 | |
16401 | at::Tensor sorted_sequence_; |
16402 | if (at::functionalization::impl::isFunctionalTensor(sorted_sequence)) { |
16403 | at::functionalization::impl::sync(sorted_sequence); |
16404 | sorted_sequence_ = at::functionalization::impl::from_functional_tensor(sorted_sequence); |
16405 | } else { |
16406 | sorted_sequence_ = sorted_sequence; |
16407 | } |
16408 | |
16409 | at::Tensor self_; |
16410 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16411 | at::functionalization::impl::sync(self); |
16412 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16413 | } else { |
16414 | self_ = self; |
16415 | } |
16416 | |
16417 | c10::optional<at::Tensor> sorter_; |
16418 | if (at::functionalization::impl::isFunctionalTensor(sorter)) { |
16419 | at::functionalization::impl::sync(sorter); |
16420 | sorter_ = at::functionalization::impl::from_functional_tensor(sorter); |
16421 | } else { |
16422 | sorter_ = sorter; |
16423 | } |
16424 | |
16425 | at::Tensor out_; |
16426 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16427 | at::functionalization::impl::sync(out); |
16428 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16429 | } else { |
16430 | out_ = out; |
16431 | } |
16432 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16433 | if ((false || at::functionalization::impl::isFunctionalTensor(sorted_sequence) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(sorter))) { |
16434 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16435 | TORCH_INTERNAL_ASSERT(false, |
16436 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16437 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16438 | } else { |
16439 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16440 | at::AutoDispatchSkipFunctionalize guard; |
16441 | at::Tensor tmp_output = at::_ops::searchsorted_Tensor_out::call(sorted_sequence_, self_, out_int32, right, side, sorter_, out_); |
16442 | return out;; |
16443 | } |
16444 | } else { |
16445 | at::Tensor tmp_output; |
16446 | { |
16447 | at::AutoDispatchSkipFunctionalize guard; |
16448 | tmp_output = at::_ops::searchsorted_Tensor::call(sorted_sequence_, self_, out_int32, right, side, sorter_); |
16449 | } |
16450 | at::functionalization::impl::replace_(out, tmp_output); |
16451 | at::functionalization::impl::commit_update(out); |
16452 | at::functionalization::impl::sync(out); |
16453 | return out; |
16454 | } |
16455 | } |
16456 | |
16457 | at::Tensor & searchsorted_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter, at::Tensor & out) { |
16458 | if (false) { |
16459 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16460 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16461 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16462 | auto sorted_sequence_meta = to_meta(sorted_sequence); |
16463 | auto sorter_meta = to_meta(sorter); |
16464 | auto out_meta = to_meta(out); |
16465 | at::AutoDispatchSkipFunctionalize func_guard; |
16466 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16467 | at::_ops::searchsorted_Scalar_out::call(sorted_sequence_meta, self, out_int32, right, side, sorter_meta, out_meta); |
16468 | } |
16469 | |
16470 | at::Tensor sorted_sequence_; |
16471 | if (at::functionalization::impl::isFunctionalTensor(sorted_sequence)) { |
16472 | at::functionalization::impl::sync(sorted_sequence); |
16473 | sorted_sequence_ = at::functionalization::impl::from_functional_tensor(sorted_sequence); |
16474 | } else { |
16475 | sorted_sequence_ = sorted_sequence; |
16476 | } |
16477 | |
16478 | c10::optional<at::Tensor> sorter_; |
16479 | if (at::functionalization::impl::isFunctionalTensor(sorter)) { |
16480 | at::functionalization::impl::sync(sorter); |
16481 | sorter_ = at::functionalization::impl::from_functional_tensor(sorter); |
16482 | } else { |
16483 | sorter_ = sorter; |
16484 | } |
16485 | |
16486 | at::Tensor out_; |
16487 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16488 | at::functionalization::impl::sync(out); |
16489 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16490 | } else { |
16491 | out_ = out; |
16492 | } |
16493 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16494 | if ((false || at::functionalization::impl::isFunctionalTensor(sorted_sequence) || at::functionalization::impl::isFunctionalTensor(sorter))) { |
16495 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16496 | TORCH_INTERNAL_ASSERT(false, |
16497 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16498 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16499 | } else { |
16500 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16501 | at::AutoDispatchSkipFunctionalize guard; |
16502 | at::Tensor tmp_output = at::_ops::searchsorted_Scalar_out::call(sorted_sequence_, self, out_int32, right, side, sorter_, out_); |
16503 | return out;; |
16504 | } |
16505 | } else { |
16506 | at::Tensor tmp_output; |
16507 | { |
16508 | at::AutoDispatchSkipFunctionalize guard; |
16509 | tmp_output = at::_ops::searchsorted_Scalar::call(sorted_sequence_, self, out_int32, right, side, sorter_); |
16510 | } |
16511 | at::functionalization::impl::replace_(out, tmp_output); |
16512 | at::functionalization::impl::commit_update(out); |
16513 | at::functionalization::impl::sync(out); |
16514 | return out; |
16515 | } |
16516 | } |
16517 | |
16518 | at::Tensor & mse_loss_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) { |
16519 | if (false) { |
16520 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16521 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16522 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16523 | auto grad_output_meta = to_meta(grad_output); |
16524 | auto self_meta = to_meta(self); |
16525 | auto target_meta = to_meta(target); |
16526 | auto grad_input_meta = to_meta(grad_input); |
16527 | at::AutoDispatchSkipFunctionalize func_guard; |
16528 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16529 | at::_ops::mse_loss_backward_grad_input::call(grad_output_meta, self_meta, target_meta, reduction, grad_input_meta); |
16530 | } |
16531 | |
16532 | at::Tensor grad_output_; |
16533 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
16534 | at::functionalization::impl::sync(grad_output); |
16535 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
16536 | } else { |
16537 | grad_output_ = grad_output; |
16538 | } |
16539 | |
16540 | at::Tensor self_; |
16541 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16542 | at::functionalization::impl::sync(self); |
16543 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16544 | } else { |
16545 | self_ = self; |
16546 | } |
16547 | |
16548 | at::Tensor target_; |
16549 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
16550 | at::functionalization::impl::sync(target); |
16551 | target_ = at::functionalization::impl::from_functional_tensor(target); |
16552 | } else { |
16553 | target_ = target; |
16554 | } |
16555 | |
16556 | at::Tensor grad_input_; |
16557 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
16558 | at::functionalization::impl::sync(grad_input); |
16559 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
16560 | } else { |
16561 | grad_input_ = grad_input; |
16562 | } |
16563 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
16564 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) { |
16565 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16566 | TORCH_INTERNAL_ASSERT(false, |
16567 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16568 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16569 | } else { |
16570 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16571 | at::AutoDispatchSkipFunctionalize guard; |
16572 | at::Tensor tmp_output = at::_ops::mse_loss_backward_grad_input::call(grad_output_, self_, target_, reduction, grad_input_); |
16573 | return grad_input;; |
16574 | } |
16575 | } else { |
16576 | at::Tensor tmp_output; |
16577 | { |
16578 | at::AutoDispatchSkipFunctionalize guard; |
16579 | tmp_output = at::_ops::mse_loss_backward::call(grad_output_, self_, target_, reduction); |
16580 | } |
16581 | at::functionalization::impl::replace_(grad_input, tmp_output); |
16582 | at::functionalization::impl::commit_update(grad_input); |
16583 | at::functionalization::impl::sync(grad_input); |
16584 | return grad_input; |
16585 | } |
16586 | } |
16587 | |
16588 | at::Tensor & smooth_l1_loss_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input) { |
16589 | if (false) { |
16590 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16591 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16592 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16593 | auto grad_output_meta = to_meta(grad_output); |
16594 | auto self_meta = to_meta(self); |
16595 | auto target_meta = to_meta(target); |
16596 | auto grad_input_meta = to_meta(grad_input); |
16597 | at::AutoDispatchSkipFunctionalize func_guard; |
16598 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16599 | at::_ops::smooth_l1_loss_backward_grad_input::call(grad_output_meta, self_meta, target_meta, reduction, beta, grad_input_meta); |
16600 | } |
16601 | |
16602 | at::Tensor grad_output_; |
16603 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
16604 | at::functionalization::impl::sync(grad_output); |
16605 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
16606 | } else { |
16607 | grad_output_ = grad_output; |
16608 | } |
16609 | |
16610 | at::Tensor self_; |
16611 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16612 | at::functionalization::impl::sync(self); |
16613 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16614 | } else { |
16615 | self_ = self; |
16616 | } |
16617 | |
16618 | at::Tensor target_; |
16619 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
16620 | at::functionalization::impl::sync(target); |
16621 | target_ = at::functionalization::impl::from_functional_tensor(target); |
16622 | } else { |
16623 | target_ = target; |
16624 | } |
16625 | |
16626 | at::Tensor grad_input_; |
16627 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
16628 | at::functionalization::impl::sync(grad_input); |
16629 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
16630 | } else { |
16631 | grad_input_ = grad_input; |
16632 | } |
16633 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
16634 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) { |
16635 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16636 | TORCH_INTERNAL_ASSERT(false, |
16637 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16638 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16639 | } else { |
16640 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16641 | at::AutoDispatchSkipFunctionalize guard; |
16642 | at::Tensor tmp_output = at::_ops::smooth_l1_loss_backward_grad_input::call(grad_output_, self_, target_, reduction, beta, grad_input_); |
16643 | return grad_input;; |
16644 | } |
16645 | } else { |
16646 | at::Tensor tmp_output; |
16647 | { |
16648 | at::AutoDispatchSkipFunctionalize guard; |
16649 | tmp_output = at::_ops::smooth_l1_loss_backward::call(grad_output_, self_, target_, reduction, beta); |
16650 | } |
16651 | at::functionalization::impl::replace_(grad_input, tmp_output); |
16652 | at::functionalization::impl::commit_update(grad_input); |
16653 | at::functionalization::impl::sync(grad_input); |
16654 | return grad_input; |
16655 | } |
16656 | } |
16657 | |
16658 | at::Tensor & huber_loss_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) { |
16659 | if (false) { |
16660 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16661 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16662 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16663 | auto grad_output_meta = to_meta(grad_output); |
16664 | auto self_meta = to_meta(self); |
16665 | auto target_meta = to_meta(target); |
16666 | auto grad_input_meta = to_meta(grad_input); |
16667 | at::AutoDispatchSkipFunctionalize func_guard; |
16668 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16669 | at::_ops::huber_loss_backward_out::call(grad_output_meta, self_meta, target_meta, reduction, delta, grad_input_meta); |
16670 | } |
16671 | |
16672 | at::Tensor grad_output_; |
16673 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
16674 | at::functionalization::impl::sync(grad_output); |
16675 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
16676 | } else { |
16677 | grad_output_ = grad_output; |
16678 | } |
16679 | |
16680 | at::Tensor self_; |
16681 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16682 | at::functionalization::impl::sync(self); |
16683 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16684 | } else { |
16685 | self_ = self; |
16686 | } |
16687 | |
16688 | at::Tensor target_; |
16689 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
16690 | at::functionalization::impl::sync(target); |
16691 | target_ = at::functionalization::impl::from_functional_tensor(target); |
16692 | } else { |
16693 | target_ = target; |
16694 | } |
16695 | |
16696 | at::Tensor grad_input_; |
16697 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
16698 | at::functionalization::impl::sync(grad_input); |
16699 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
16700 | } else { |
16701 | grad_input_ = grad_input; |
16702 | } |
16703 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
16704 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) { |
16705 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16706 | TORCH_INTERNAL_ASSERT(false, |
16707 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16708 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16709 | } else { |
16710 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16711 | at::AutoDispatchSkipFunctionalize guard; |
16712 | at::Tensor tmp_output = at::_ops::huber_loss_backward_out::call(grad_output_, self_, target_, reduction, delta, grad_input_); |
16713 | return grad_input;; |
16714 | } |
16715 | } else { |
16716 | at::Tensor tmp_output; |
16717 | { |
16718 | at::AutoDispatchSkipFunctionalize guard; |
16719 | tmp_output = at::_ops::huber_loss_backward::call(grad_output_, self_, target_, reduction, delta); |
16720 | } |
16721 | at::functionalization::impl::replace_(grad_input, tmp_output); |
16722 | at::functionalization::impl::commit_update(grad_input); |
16723 | at::functionalization::impl::sync(grad_input); |
16724 | return grad_input; |
16725 | } |
16726 | } |
16727 | |
16728 | at::Tensor & elu_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) { |
16729 | if (false) { |
16730 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16731 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16732 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16733 | auto grad_output_meta = to_meta(grad_output); |
16734 | auto self_or_result_meta = to_meta(self_or_result); |
16735 | auto grad_input_meta = to_meta(grad_input); |
16736 | at::AutoDispatchSkipFunctionalize func_guard; |
16737 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16738 | at::_ops::elu_backward_grad_input::call(grad_output_meta, alpha, scale, input_scale, is_result, self_or_result_meta, grad_input_meta); |
16739 | } |
16740 | |
16741 | at::Tensor grad_output_; |
16742 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
16743 | at::functionalization::impl::sync(grad_output); |
16744 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
16745 | } else { |
16746 | grad_output_ = grad_output; |
16747 | } |
16748 | |
16749 | at::Tensor self_or_result_; |
16750 | if (at::functionalization::impl::isFunctionalTensor(self_or_result)) { |
16751 | at::functionalization::impl::sync(self_or_result); |
16752 | self_or_result_ = at::functionalization::impl::from_functional_tensor(self_or_result); |
16753 | } else { |
16754 | self_or_result_ = self_or_result; |
16755 | } |
16756 | |
16757 | at::Tensor grad_input_; |
16758 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
16759 | at::functionalization::impl::sync(grad_input); |
16760 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
16761 | } else { |
16762 | grad_input_ = grad_input; |
16763 | } |
16764 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
16765 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self_or_result))) { |
16766 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16767 | TORCH_INTERNAL_ASSERT(false, |
16768 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16769 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16770 | } else { |
16771 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16772 | at::AutoDispatchSkipFunctionalize guard; |
16773 | at::Tensor tmp_output = at::_ops::elu_backward_grad_input::call(grad_output_, alpha, scale, input_scale, is_result, self_or_result_, grad_input_); |
16774 | return grad_input;; |
16775 | } |
16776 | } else { |
16777 | at::Tensor tmp_output; |
16778 | { |
16779 | at::AutoDispatchSkipFunctionalize guard; |
16780 | tmp_output = at::_ops::elu_backward::call(grad_output_, alpha, scale, input_scale, is_result, self_or_result_); |
16781 | } |
16782 | at::functionalization::impl::replace_(grad_input, tmp_output); |
16783 | at::functionalization::impl::commit_update(grad_input); |
16784 | at::functionalization::impl::sync(grad_input); |
16785 | return grad_input; |
16786 | } |
16787 | } |
16788 | |
16789 | at::Tensor & glu_jvp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) { |
16790 | if (false) { |
16791 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16792 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16793 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16794 | auto glu_meta = to_meta(glu); |
16795 | auto x_meta = to_meta(x); |
16796 | auto dx_meta = to_meta(dx); |
16797 | auto out_meta = to_meta(out); |
16798 | at::AutoDispatchSkipFunctionalize func_guard; |
16799 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16800 | at::_ops::glu_jvp_out::call(glu_meta, x_meta, dx_meta, dim, out_meta); |
16801 | } |
16802 | |
16803 | at::Tensor glu_; |
16804 | if (at::functionalization::impl::isFunctionalTensor(glu)) { |
16805 | at::functionalization::impl::sync(glu); |
16806 | glu_ = at::functionalization::impl::from_functional_tensor(glu); |
16807 | } else { |
16808 | glu_ = glu; |
16809 | } |
16810 | |
16811 | at::Tensor x_; |
16812 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
16813 | at::functionalization::impl::sync(x); |
16814 | x_ = at::functionalization::impl::from_functional_tensor(x); |
16815 | } else { |
16816 | x_ = x; |
16817 | } |
16818 | |
16819 | at::Tensor dx_; |
16820 | if (at::functionalization::impl::isFunctionalTensor(dx)) { |
16821 | at::functionalization::impl::sync(dx); |
16822 | dx_ = at::functionalization::impl::from_functional_tensor(dx); |
16823 | } else { |
16824 | dx_ = dx; |
16825 | } |
16826 | |
16827 | at::Tensor out_; |
16828 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16829 | at::functionalization::impl::sync(out); |
16830 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16831 | } else { |
16832 | out_ = out; |
16833 | } |
16834 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16835 | if ((false || at::functionalization::impl::isFunctionalTensor(glu) || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(dx))) { |
16836 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16837 | TORCH_INTERNAL_ASSERT(false, |
16838 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16839 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16840 | } else { |
16841 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16842 | at::AutoDispatchSkipFunctionalize guard; |
16843 | at::Tensor tmp_output = at::_ops::glu_jvp_out::call(glu_, x_, dx_, dim, out_); |
16844 | return out;; |
16845 | } |
16846 | } else { |
16847 | at::Tensor tmp_output; |
16848 | { |
16849 | at::AutoDispatchSkipFunctionalize guard; |
16850 | tmp_output = at::_ops::glu_jvp::call(glu_, x_, dx_, dim); |
16851 | } |
16852 | at::functionalization::impl::replace_(out, tmp_output); |
16853 | at::functionalization::impl::commit_update(out); |
16854 | at::functionalization::impl::sync(out); |
16855 | return out; |
16856 | } |
16857 | } |
16858 | |
16859 | at::Tensor & hardsigmoid_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) { |
16860 | if (false) { |
16861 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16862 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16863 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16864 | auto grad_output_meta = to_meta(grad_output); |
16865 | auto self_meta = to_meta(self); |
16866 | auto grad_input_meta = to_meta(grad_input); |
16867 | at::AutoDispatchSkipFunctionalize func_guard; |
16868 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16869 | at::_ops::hardsigmoid_backward_grad_input::call(grad_output_meta, self_meta, grad_input_meta); |
16870 | } |
16871 | |
16872 | at::Tensor grad_output_; |
16873 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
16874 | at::functionalization::impl::sync(grad_output); |
16875 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
16876 | } else { |
16877 | grad_output_ = grad_output; |
16878 | } |
16879 | |
16880 | at::Tensor self_; |
16881 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16882 | at::functionalization::impl::sync(self); |
16883 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16884 | } else { |
16885 | self_ = self; |
16886 | } |
16887 | |
16888 | at::Tensor grad_input_; |
16889 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
16890 | at::functionalization::impl::sync(grad_input); |
16891 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
16892 | } else { |
16893 | grad_input_ = grad_input; |
16894 | } |
16895 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
16896 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
16897 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16898 | TORCH_INTERNAL_ASSERT(false, |
16899 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16900 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16901 | } else { |
16902 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16903 | at::AutoDispatchSkipFunctionalize guard; |
16904 | at::Tensor tmp_output = at::_ops::hardsigmoid_backward_grad_input::call(grad_output_, self_, grad_input_); |
16905 | return grad_input;; |
16906 | } |
16907 | } else { |
16908 | at::Tensor tmp_output; |
16909 | { |
16910 | at::AutoDispatchSkipFunctionalize guard; |
16911 | tmp_output = at::_ops::hardsigmoid_backward::call(grad_output_, self_); |
16912 | } |
16913 | at::functionalization::impl::replace_(grad_input, tmp_output); |
16914 | at::functionalization::impl::commit_update(grad_input); |
16915 | at::functionalization::impl::sync(grad_input); |
16916 | return grad_input; |
16917 | } |
16918 | } |
16919 | |
16920 | at::Tensor & log_sigmoid_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
16921 | if (false) { |
16922 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16923 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16924 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16925 | auto self_meta = to_meta(self); |
16926 | auto out_meta = to_meta(out); |
16927 | at::AutoDispatchSkipFunctionalize func_guard; |
16928 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16929 | at::_ops::log_sigmoid_out::call(self_meta, out_meta); |
16930 | } |
16931 | |
16932 | at::Tensor self_; |
16933 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16934 | at::functionalization::impl::sync(self); |
16935 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16936 | } else { |
16937 | self_ = self; |
16938 | } |
16939 | |
16940 | at::Tensor out_; |
16941 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16942 | at::functionalization::impl::sync(out); |
16943 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16944 | } else { |
16945 | out_ = out; |
16946 | } |
16947 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16948 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16949 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16950 | TORCH_INTERNAL_ASSERT(false, |
16951 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16952 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16953 | } else { |
16954 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16955 | at::AutoDispatchSkipFunctionalize guard; |
16956 | at::Tensor tmp_output = at::_ops::log_sigmoid_out::call(self_, out_); |
16957 | return out;; |
16958 | } |
16959 | } else { |
16960 | at::Tensor tmp_output; |
16961 | { |
16962 | at::AutoDispatchSkipFunctionalize guard; |
16963 | tmp_output = at::_ops::log_sigmoid::call(self_); |
16964 | } |
16965 | at::functionalization::impl::replace_(out, tmp_output); |
16966 | at::functionalization::impl::commit_update(out); |
16967 | at::functionalization::impl::sync(out); |
16968 | return out; |
16969 | } |
16970 | } |
16971 | |
16972 | ::std::tuple<at::Tensor &,at::Tensor &> log_sigmoid_forward_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) { |
16973 | if (false) { |
16974 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16975 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16976 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16977 | auto self_meta = to_meta(self); |
16978 | auto output_meta = to_meta(output); |
16979 | auto buffer_meta = to_meta(buffer); |
16980 | at::AutoDispatchSkipFunctionalize func_guard; |
16981 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16982 | at::_ops::log_sigmoid_forward_output::call(self_meta, output_meta, buffer_meta); |
16983 | } |
16984 | |
16985 | at::Tensor self_; |
16986 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16987 | at::functionalization::impl::sync(self); |
16988 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16989 | } else { |
16990 | self_ = self; |
16991 | } |
16992 | |
16993 | at::Tensor output_; |
16994 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
16995 | at::functionalization::impl::sync(output); |
16996 | output_ = at::functionalization::impl::from_functional_tensor(output); |
16997 | } else { |
16998 | output_ = output; |
16999 | } |
17000 | |
17001 | at::Tensor buffer_; |
17002 | if (at::functionalization::impl::isFunctionalTensor(buffer)) { |
17003 | at::functionalization::impl::sync(buffer); |
17004 | buffer_ = at::functionalization::impl::from_functional_tensor(buffer); |
17005 | } else { |
17006 | buffer_ = buffer; |
17007 | } |
17008 | if (!(true && at::functionalization::impl::isFunctionalTensor(output) && at::functionalization::impl::isFunctionalTensor(buffer))) { |
17009 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17010 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17011 | TORCH_INTERNAL_ASSERT(false, |
17012 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17013 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17014 | } else { |
17015 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17016 | at::AutoDispatchSkipFunctionalize guard; |
17017 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::log_sigmoid_forward_output::call(self_, output_, buffer_); |
17018 | return ::std::tuple<at::Tensor &,at::Tensor &>(output, buffer);; |
17019 | } |
17020 | } else { |
17021 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
17022 | { |
17023 | at::AutoDispatchSkipFunctionalize guard; |
17024 | tmp_output = at::_ops::log_sigmoid_forward::call(self_); |
17025 | } |
17026 | at::functionalization::impl::replace_(output, std::get<0>(tmp_output)); |
17027 | at::functionalization::impl::commit_update(output); |
17028 | at::functionalization::impl::sync(output); |
17029 | at::functionalization::impl::replace_(buffer, std::get<1>(tmp_output)); |
17030 | at::functionalization::impl::commit_update(buffer); |
17031 | at::functionalization::impl::sync(buffer); |
17032 | return ::std::tuple<at::Tensor &,at::Tensor &>(output, buffer); |
17033 | } |
17034 | } |
17035 | |
17036 | at::Tensor & rrelu_with_noise_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator, at::Tensor & out) { |
17037 | if (false) { |
17038 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17039 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17040 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17041 | auto self_meta = to_meta(self); |
17042 | auto noise_meta = to_meta(noise); |
17043 | auto out_meta = to_meta(out); |
17044 | at::AutoDispatchSkipFunctionalize func_guard; |
17045 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17046 | at::_ops::rrelu_with_noise_out::call(self_meta, noise_meta, lower, upper, training, generator, out_meta); |
17047 | } |
17048 | |
17049 | at::Tensor self_; |
17050 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17051 | at::functionalization::impl::sync(self); |
17052 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17053 | } else { |
17054 | self_ = self; |
17055 | } |
17056 | |
17057 | at::Tensor noise_; |
17058 | if (at::functionalization::impl::isFunctionalTensor(noise)) { |
17059 | at::functionalization::impl::sync(noise); |
17060 | noise_ = at::functionalization::impl::from_functional_tensor(noise); |
17061 | } else { |
17062 | noise_ = noise; |
17063 | } |
17064 | |
17065 | at::Tensor out_; |
17066 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17067 | at::functionalization::impl::sync(out); |
17068 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17069 | } else { |
17070 | out_ = out; |
17071 | } |
17072 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17073 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(noise))) { |
17074 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17075 | TORCH_INTERNAL_ASSERT(false, |
17076 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17077 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17078 | } else { |
17079 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17080 | at::AutoDispatchSkipFunctionalize guard; |
17081 | at::Tensor tmp_output = at::_ops::rrelu_with_noise_out::call(self_, noise_, lower, upper, training, generator, out_); |
17082 | return out;; |
17083 | } |
17084 | } else { |
17085 | at::Tensor tmp_output; |
17086 | { |
17087 | at::AutoDispatchSkipFunctionalize guard; |
17088 | tmp_output = at::_ops::rrelu_with_noise::call(self_, noise_, lower, upper, training, generator); |
17089 | } |
17090 | at::functionalization::impl::replace_(out, tmp_output); |
17091 | at::functionalization::impl::commit_update(out); |
17092 | at::functionalization::impl::sync(out); |
17093 | return out; |
17094 | } |
17095 | } |
17096 | |
17097 | at::Tensor & rrelu_with_noise_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) { |
17098 | if (true) { |
17099 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17100 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17101 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17102 | auto self_meta = to_meta(self); |
17103 | auto noise_meta = to_meta(noise); |
17104 | at::AutoDispatchSkipFunctionalize func_guard; |
17105 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17106 | at::_ops::rrelu_with_noise_::call(self_meta, noise_meta, lower, upper, training, generator); |
17107 | } |
17108 | |
17109 | at::Tensor self_; |
17110 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17111 | at::functionalization::impl::sync(self); |
17112 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17113 | } else { |
17114 | self_ = self; |
17115 | } |
17116 | |
17117 | at::Tensor noise_; |
17118 | if (at::functionalization::impl::isFunctionalTensor(noise)) { |
17119 | at::functionalization::impl::sync(noise); |
17120 | noise_ = at::functionalization::impl::from_functional_tensor(noise); |
17121 | } else { |
17122 | noise_ = noise; |
17123 | } |
17124 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
17125 | if ((false || at::functionalization::impl::isFunctionalTensor(noise))) { |
17126 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17127 | TORCH_INTERNAL_ASSERT(false, |
17128 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17129 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17130 | } else { |
17131 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17132 | at::AutoDispatchSkipFunctionalize guard; |
17133 | at::Tensor tmp_output = at::_ops::rrelu_with_noise_::call(self_, noise_, lower, upper, training, generator); |
17134 | return self;; |
17135 | } |
17136 | } else { |
17137 | at::Tensor tmp_output; |
17138 | { |
17139 | at::AutoDispatchSkipFunctionalize guard; |
17140 | tmp_output = at::_ops::rrelu_with_noise::call(self_, noise_, lower, upper, training, generator); |
17141 | } |
17142 | at::functionalization::impl::replace_(self, tmp_output); |
17143 | at::functionalization::impl::commit_update(self); |
17144 | at::functionalization::impl::sync(self); |
17145 | return self; |
17146 | } |
17147 | } |
17148 | |
17149 | at::Tensor & rrelu_with_noise_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result, at::Tensor & out) { |
17150 | if (false) { |
17151 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17152 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17153 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17154 | auto grad_output_meta = to_meta(grad_output); |
17155 | auto self_meta = to_meta(self); |
17156 | auto noise_meta = to_meta(noise); |
17157 | auto out_meta = to_meta(out); |
17158 | at::AutoDispatchSkipFunctionalize func_guard; |
17159 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17160 | at::_ops::rrelu_with_noise_backward_out::call(grad_output_meta, self_meta, noise_meta, lower, upper, training, self_is_result, out_meta); |
17161 | } |
17162 | |
17163 | at::Tensor grad_output_; |
17164 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
17165 | at::functionalization::impl::sync(grad_output); |
17166 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
17167 | } else { |
17168 | grad_output_ = grad_output; |
17169 | } |
17170 | |
17171 | at::Tensor self_; |
17172 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17173 | at::functionalization::impl::sync(self); |
17174 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17175 | } else { |
17176 | self_ = self; |
17177 | } |
17178 | |
17179 | at::Tensor noise_; |
17180 | if (at::functionalization::impl::isFunctionalTensor(noise)) { |
17181 | at::functionalization::impl::sync(noise); |
17182 | noise_ = at::functionalization::impl::from_functional_tensor(noise); |
17183 | } else { |
17184 | noise_ = noise; |
17185 | } |
17186 | |
17187 | at::Tensor out_; |
17188 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17189 | at::functionalization::impl::sync(out); |
17190 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17191 | } else { |
17192 | out_ = out; |
17193 | } |
17194 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17195 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(noise))) { |
17196 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17197 | TORCH_INTERNAL_ASSERT(false, |
17198 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17199 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17200 | } else { |
17201 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17202 | at::AutoDispatchSkipFunctionalize guard; |
17203 | at::Tensor tmp_output = at::_ops::rrelu_with_noise_backward_out::call(grad_output_, self_, noise_, lower, upper, training, self_is_result, out_); |
17204 | return out;; |
17205 | } |
17206 | } else { |
17207 | at::Tensor tmp_output; |
17208 | { |
17209 | at::AutoDispatchSkipFunctionalize guard; |
17210 | tmp_output = at::_ops::rrelu_with_noise_backward::call(grad_output_, self_, noise_, lower, upper, training, self_is_result); |
17211 | } |
17212 | at::functionalization::impl::replace_(out, tmp_output); |
17213 | at::functionalization::impl::commit_update(out); |
17214 | at::functionalization::impl::sync(out); |
17215 | return out; |
17216 | } |
17217 | } |
17218 | |
17219 | at::Tensor & softplus_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) { |
17220 | if (false) { |
17221 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17222 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17223 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17224 | auto grad_output_meta = to_meta(grad_output); |
17225 | auto self_meta = to_meta(self); |
17226 | auto grad_input_meta = to_meta(grad_input); |
17227 | at::AutoDispatchSkipFunctionalize func_guard; |
17228 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17229 | at::_ops::softplus_backward_grad_input::call(grad_output_meta, self_meta, beta, threshold, grad_input_meta); |
17230 | } |
17231 | |
17232 | at::Tensor grad_output_; |
17233 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
17234 | at::functionalization::impl::sync(grad_output); |
17235 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
17236 | } else { |
17237 | grad_output_ = grad_output; |
17238 | } |
17239 | |
17240 | at::Tensor self_; |
17241 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17242 | at::functionalization::impl::sync(self); |
17243 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17244 | } else { |
17245 | self_ = self; |
17246 | } |
17247 | |
17248 | at::Tensor grad_input_; |
17249 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
17250 | at::functionalization::impl::sync(grad_input); |
17251 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
17252 | } else { |
17253 | grad_input_ = grad_input; |
17254 | } |
17255 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
17256 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
17257 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17258 | TORCH_INTERNAL_ASSERT(false, |
17259 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17260 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17261 | } else { |
17262 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17263 | at::AutoDispatchSkipFunctionalize guard; |
17264 | at::Tensor tmp_output = at::_ops::softplus_backward_grad_input::call(grad_output_, self_, beta, threshold, grad_input_); |
17265 | return grad_input;; |
17266 | } |
17267 | } else { |
17268 | at::Tensor tmp_output; |
17269 | { |
17270 | at::AutoDispatchSkipFunctionalize guard; |
17271 | tmp_output = at::_ops::softplus_backward::call(grad_output_, self_, beta, threshold); |
17272 | } |
17273 | at::functionalization::impl::replace_(grad_input, tmp_output); |
17274 | at::functionalization::impl::commit_update(grad_input); |
17275 | at::functionalization::impl::sync(grad_input); |
17276 | return grad_input; |
17277 | } |
17278 | } |
17279 | |
17280 | at::Tensor & mkldnn_adaptive_avg_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { |
17281 | if (false) { |
17282 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17283 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17284 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17285 | auto self_meta = to_meta(self); |
17286 | auto out_meta = to_meta(out); |
17287 | at::AutoDispatchSkipFunctionalize func_guard; |
17288 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17289 | at::_ops::mkldnn_adaptive_avg_pool2d_out::call(self_meta, output_size, out_meta); |
17290 | } |
17291 | |
17292 | at::Tensor self_; |
17293 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17294 | at::functionalization::impl::sync(self); |
17295 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17296 | } else { |
17297 | self_ = self; |
17298 | } |
17299 | |
17300 | at::Tensor out_; |
17301 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17302 | at::functionalization::impl::sync(out); |
17303 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17304 | } else { |
17305 | out_ = out; |
17306 | } |
17307 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17308 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17309 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17310 | TORCH_INTERNAL_ASSERT(false, |
17311 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17312 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17313 | } else { |
17314 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17315 | at::AutoDispatchSkipFunctionalize guard; |
17316 | at::Tensor tmp_output = at::_ops::mkldnn_adaptive_avg_pool2d_out::call(self_, output_size, out_); |
17317 | return out;; |
17318 | } |
17319 | } else { |
17320 | at::Tensor tmp_output; |
17321 | { |
17322 | at::AutoDispatchSkipFunctionalize guard; |
17323 | tmp_output = at::_ops::mkldnn_adaptive_avg_pool2d::call(self_, output_size); |
17324 | } |
17325 | at::functionalization::impl::replace_(out, tmp_output); |
17326 | at::functionalization::impl::commit_update(out); |
17327 | at::functionalization::impl::sync(out); |
17328 | return out; |
17329 | } |
17330 | } |
17331 | |
17332 | ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) { |
17333 | if (false) { |
17334 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17335 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17336 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17337 | auto self_meta = to_meta(self); |
17338 | auto out_meta = to_meta(out); |
17339 | auto indices_meta = to_meta(indices); |
17340 | at::AutoDispatchSkipFunctionalize func_guard; |
17341 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17342 | at::_ops::adaptive_max_pool2d_out::call(self_meta, output_size, out_meta, indices_meta); |
17343 | } |
17344 | |
17345 | at::Tensor self_; |
17346 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17347 | at::functionalization::impl::sync(self); |
17348 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17349 | } else { |
17350 | self_ = self; |
17351 | } |
17352 | |
17353 | at::Tensor out_; |
17354 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17355 | at::functionalization::impl::sync(out); |
17356 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17357 | } else { |
17358 | out_ = out; |
17359 | } |
17360 | |
17361 | at::Tensor indices_; |
17362 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
17363 | at::functionalization::impl::sync(indices); |
17364 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
17365 | } else { |
17366 | indices_ = indices; |
17367 | } |
17368 | if (!(true && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(indices))) { |
17369 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17370 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17371 | TORCH_INTERNAL_ASSERT(false, |
17372 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17373 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17374 | } else { |
17375 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17376 | at::AutoDispatchSkipFunctionalize guard; |
17377 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::adaptive_max_pool2d_out::call(self_, output_size, out_, indices_); |
17378 | return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices);; |
17379 | } |
17380 | } else { |
17381 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
17382 | { |
17383 | at::AutoDispatchSkipFunctionalize guard; |
17384 | tmp_output = at::_ops::adaptive_max_pool2d::call(self_, output_size); |
17385 | } |
17386 | at::functionalization::impl::replace_(out, std::get<0>(tmp_output)); |
17387 | at::functionalization::impl::commit_update(out); |
17388 | at::functionalization::impl::sync(out); |
17389 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
17390 | at::functionalization::impl::commit_update(indices); |
17391 | at::functionalization::impl::sync(indices); |
17392 | return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices); |
17393 | } |
17394 | } |
17395 | |
17396 | ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) { |
17397 | if (false) { |
17398 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17399 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17400 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17401 | auto self_meta = to_meta(self); |
17402 | auto out_meta = to_meta(out); |
17403 | auto indices_meta = to_meta(indices); |
17404 | at::AutoDispatchSkipFunctionalize func_guard; |
17405 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17406 | at::_ops::adaptive_max_pool3d_out::call(self_meta, output_size, out_meta, indices_meta); |
17407 | } |
17408 | |
17409 | at::Tensor self_; |
17410 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17411 | at::functionalization::impl::sync(self); |
17412 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17413 | } else { |
17414 | self_ = self; |
17415 | } |
17416 | |
17417 | at::Tensor out_; |
17418 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17419 | at::functionalization::impl::sync(out); |
17420 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17421 | } else { |
17422 | out_ = out; |
17423 | } |
17424 | |
17425 | at::Tensor indices_; |
17426 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
17427 | at::functionalization::impl::sync(indices); |
17428 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
17429 | } else { |
17430 | indices_ = indices; |
17431 | } |
17432 | if (!(true && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(indices))) { |
17433 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17434 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17435 | TORCH_INTERNAL_ASSERT(false, |
17436 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17437 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17438 | } else { |
17439 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17440 | at::AutoDispatchSkipFunctionalize guard; |
17441 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::adaptive_max_pool3d_out::call(self_, output_size, out_, indices_); |
17442 | return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices);; |
17443 | } |
17444 | } else { |
17445 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
17446 | { |
17447 | at::AutoDispatchSkipFunctionalize guard; |
17448 | tmp_output = at::_ops::adaptive_max_pool3d::call(self_, output_size); |
17449 | } |
17450 | at::functionalization::impl::replace_(out, std::get<0>(tmp_output)); |
17451 | at::functionalization::impl::commit_update(out); |
17452 | at::functionalization::impl::sync(out); |
17453 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
17454 | at::functionalization::impl::commit_update(indices); |
17455 | at::functionalization::impl::sync(indices); |
17456 | return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices); |
17457 | } |
17458 | } |
17459 | |
17460 | at::Tensor & avg_pool2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input) { |
17461 | if (false) { |
17462 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17463 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17464 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17465 | auto grad_output_meta = to_meta(grad_output); |
17466 | auto self_meta = to_meta(self); |
17467 | auto grad_input_meta = to_meta(grad_input); |
17468 | at::AutoDispatchSkipFunctionalize func_guard; |
17469 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17470 | at::_ops::avg_pool2d_backward_grad_input::call(grad_output_meta, self_meta, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input_meta); |
17471 | } |
17472 | |
17473 | at::Tensor grad_output_; |
17474 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
17475 | at::functionalization::impl::sync(grad_output); |
17476 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
17477 | } else { |
17478 | grad_output_ = grad_output; |
17479 | } |
17480 | |
17481 | at::Tensor self_; |
17482 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17483 | at::functionalization::impl::sync(self); |
17484 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17485 | } else { |
17486 | self_ = self; |
17487 | } |
17488 | |
17489 | at::Tensor grad_input_; |
17490 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
17491 | at::functionalization::impl::sync(grad_input); |
17492 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
17493 | } else { |
17494 | grad_input_ = grad_input; |
17495 | } |
17496 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
17497 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
17498 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17499 | TORCH_INTERNAL_ASSERT(false, |
17500 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17501 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17502 | } else { |
17503 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17504 | at::AutoDispatchSkipFunctionalize guard; |
17505 | at::Tensor tmp_output = at::_ops::avg_pool2d_backward_grad_input::call(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input_); |
17506 | return grad_input;; |
17507 | } |
17508 | } else { |
17509 | at::Tensor tmp_output; |
17510 | { |
17511 | at::AutoDispatchSkipFunctionalize guard; |
17512 | tmp_output = at::_ops::avg_pool2d_backward::call(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); |
17513 | } |
17514 | at::functionalization::impl::replace_(grad_input, tmp_output); |
17515 | at::functionalization::impl::commit_update(grad_input); |
17516 | at::functionalization::impl::sync(grad_input); |
17517 | return grad_input; |
17518 | } |
17519 | } |
17520 | |
17521 | ::std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) { |
17522 | if (false) { |
17523 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17524 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17525 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17526 | auto self_meta = to_meta(self); |
17527 | auto out_meta = to_meta(out); |
17528 | auto indices_meta = to_meta(indices); |
17529 | at::AutoDispatchSkipFunctionalize func_guard; |
17530 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17531 | at::_ops::max_pool2d_with_indices_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta, indices_meta); |
17532 | } |
17533 | |
17534 | at::Tensor self_; |
17535 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17536 | at::functionalization::impl::sync(self); |
17537 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17538 | } else { |
17539 | self_ = self; |
17540 | } |
17541 | |
17542 | at::Tensor out_; |
17543 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17544 | at::functionalization::impl::sync(out); |
17545 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17546 | } else { |
17547 | out_ = out; |
17548 | } |
17549 | |
17550 | at::Tensor indices_; |
17551 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
17552 | at::functionalization::impl::sync(indices); |
17553 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
17554 | } else { |
17555 | indices_ = indices; |
17556 | } |
17557 | if (!(true && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(indices))) { |
17558 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17559 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17560 | TORCH_INTERNAL_ASSERT(false, |
17561 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17562 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17563 | } else { |
17564 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17565 | at::AutoDispatchSkipFunctionalize guard; |
17566 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::max_pool2d_with_indices_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_, indices_); |
17567 | return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices);; |
17568 | } |
17569 | } else { |
17570 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
17571 | { |
17572 | at::AutoDispatchSkipFunctionalize guard; |
17573 | tmp_output = at::_ops::max_pool2d_with_indices::call(self_, kernel_size, stride, padding, dilation, ceil_mode); |
17574 | } |
17575 | at::functionalization::impl::replace_(out, std::get<0>(tmp_output)); |
17576 | at::functionalization::impl::commit_update(out); |
17577 | at::functionalization::impl::sync(out); |
17578 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
17579 | at::functionalization::impl::commit_update(indices); |
17580 | at::functionalization::impl::sync(indices); |
17581 | return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices); |
17582 | } |
17583 | } |
17584 | |
17585 | at::Tensor & upsample_linear1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales, at::Tensor & out) { |
17586 | if (false) { |
17587 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17588 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17589 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17590 | auto self_meta = to_meta(self); |
17591 | auto out_meta = to_meta(out); |
17592 | at::AutoDispatchSkipFunctionalize func_guard; |
17593 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17594 | at::_ops::upsample_linear1d_out::call(self_meta, output_size, align_corners, scales, out_meta); |
17595 | } |
17596 | |
17597 | at::Tensor self_; |
17598 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17599 | at::functionalization::impl::sync(self); |
17600 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17601 | } else { |
17602 | self_ = self; |
17603 | } |
17604 | |
17605 | at::Tensor out_; |
17606 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17607 | at::functionalization::impl::sync(out); |
17608 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17609 | } else { |
17610 | out_ = out; |
17611 | } |
17612 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17613 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17614 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17615 | TORCH_INTERNAL_ASSERT(false, |
17616 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17617 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17618 | } else { |
17619 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17620 | at::AutoDispatchSkipFunctionalize guard; |
17621 | at::Tensor tmp_output = at::_ops::upsample_linear1d_out::call(self_, output_size, align_corners, scales, out_); |
17622 | return out;; |
17623 | } |
17624 | } else { |
17625 | at::Tensor tmp_output; |
17626 | { |
17627 | at::AutoDispatchSkipFunctionalize guard; |
17628 | tmp_output = at::_ops::upsample_linear1d::call(self_, output_size, align_corners, scales); |
17629 | } |
17630 | at::functionalization::impl::replace_(out, tmp_output); |
17631 | at::functionalization::impl::commit_update(out); |
17632 | at::functionalization::impl::sync(out); |
17633 | return out; |
17634 | } |
17635 | } |
17636 | |
17637 | at::Tensor & upsample_linear1d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) { |
17638 | if (false) { |
17639 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17640 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17641 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17642 | auto grad_output_meta = to_meta(grad_output); |
17643 | auto grad_input_meta = to_meta(grad_input); |
17644 | at::AutoDispatchSkipFunctionalize func_guard; |
17645 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17646 | at::_ops::upsample_linear1d_backward_grad_input::call(grad_output_meta, output_size, input_size, align_corners, scales, grad_input_meta); |
17647 | } |
17648 | |
17649 | at::Tensor grad_output_; |
17650 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
17651 | at::functionalization::impl::sync(grad_output); |
17652 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
17653 | } else { |
17654 | grad_output_ = grad_output; |
17655 | } |
17656 | |
17657 | at::Tensor grad_input_; |
17658 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
17659 | at::functionalization::impl::sync(grad_input); |
17660 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
17661 | } else { |
17662 | grad_input_ = grad_input; |
17663 | } |
17664 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
17665 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) { |
17666 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17667 | TORCH_INTERNAL_ASSERT(false, |
17668 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17669 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17670 | } else { |
17671 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17672 | at::AutoDispatchSkipFunctionalize guard; |
17673 | at::Tensor tmp_output = at::_ops::upsample_linear1d_backward_grad_input::call(grad_output_, output_size, input_size, align_corners, scales, grad_input_); |
17674 | return grad_input;; |
17675 | } |
17676 | } else { |
17677 | at::Tensor tmp_output; |
17678 | { |
17679 | at::AutoDispatchSkipFunctionalize guard; |
17680 | tmp_output = at::_ops::upsample_linear1d_backward::call(grad_output_, output_size, input_size, align_corners, scales); |
17681 | } |
17682 | at::functionalization::impl::replace_(grad_input, tmp_output); |
17683 | at::functionalization::impl::commit_update(grad_input); |
17684 | at::functionalization::impl::sync(grad_input); |
17685 | return grad_input; |
17686 | } |
17687 | } |
17688 | |
17689 | at::Tensor & upsample_bicubic2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) { |
17690 | if (false) { |
17691 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17692 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17693 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17694 | auto self_meta = to_meta(self); |
17695 | auto out_meta = to_meta(out); |
17696 | at::AutoDispatchSkipFunctionalize func_guard; |
17697 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17698 | at::_ops::upsample_bicubic2d_out::call(self_meta, output_size, align_corners, scales_h, scales_w, out_meta); |
17699 | } |
17700 | |
17701 | at::Tensor self_; |
17702 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17703 | at::functionalization::impl::sync(self); |
17704 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17705 | } else { |
17706 | self_ = self; |
17707 | } |
17708 | |
17709 | at::Tensor out_; |
17710 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17711 | at::functionalization::impl::sync(out); |
17712 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17713 | } else { |
17714 | out_ = out; |
17715 | } |
17716 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17717 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17718 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17719 | TORCH_INTERNAL_ASSERT(false, |
17720 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17721 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17722 | } else { |
17723 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17724 | at::AutoDispatchSkipFunctionalize guard; |
17725 | at::Tensor tmp_output = at::_ops::upsample_bicubic2d_out::call(self_, output_size, align_corners, scales_h, scales_w, out_); |
17726 | return out;; |
17727 | } |
17728 | } else { |
17729 | at::Tensor tmp_output; |
17730 | { |
17731 | at::AutoDispatchSkipFunctionalize guard; |
17732 | tmp_output = at::_ops::upsample_bicubic2d::call(self_, output_size, align_corners, scales_h, scales_w); |
17733 | } |
17734 | at::functionalization::impl::replace_(out, tmp_output); |
17735 | at::functionalization::impl::commit_update(out); |
17736 | at::functionalization::impl::sync(out); |
17737 | return out; |
17738 | } |
17739 | } |
17740 | |
17741 | at::Tensor & upsample_bicubic2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) { |
17742 | if (false) { |
17743 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17744 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17745 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17746 | auto grad_output_meta = to_meta(grad_output); |
17747 | auto grad_input_meta = to_meta(grad_input); |
17748 | at::AutoDispatchSkipFunctionalize func_guard; |
17749 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17750 | at::_ops::upsample_bicubic2d_backward_grad_input::call(grad_output_meta, output_size, input_size, align_corners, scales_h, scales_w, grad_input_meta); |
17751 | } |
17752 | |
17753 | at::Tensor grad_output_; |
17754 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
17755 | at::functionalization::impl::sync(grad_output); |
17756 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
17757 | } else { |
17758 | grad_output_ = grad_output; |
17759 | } |
17760 | |
17761 | at::Tensor grad_input_; |
17762 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
17763 | at::functionalization::impl::sync(grad_input); |
17764 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
17765 | } else { |
17766 | grad_input_ = grad_input; |
17767 | } |
17768 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
17769 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) { |
17770 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17771 | TORCH_INTERNAL_ASSERT(false, |
17772 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17773 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17774 | } else { |
17775 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17776 | at::AutoDispatchSkipFunctionalize guard; |
17777 | at::Tensor tmp_output = at::_ops::upsample_bicubic2d_backward_grad_input::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w, grad_input_); |
17778 | return grad_input;; |
17779 | } |
17780 | } else { |
17781 | at::Tensor tmp_output; |
17782 | { |
17783 | at::AutoDispatchSkipFunctionalize guard; |
17784 | tmp_output = at::_ops::upsample_bicubic2d_backward::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w); |
17785 | } |
17786 | at::functionalization::impl::replace_(grad_input, tmp_output); |
17787 | at::functionalization::impl::commit_update(grad_input); |
17788 | at::functionalization::impl::sync(grad_input); |
17789 | return grad_input; |
17790 | } |
17791 | } |
17792 | |
17793 | at::Tensor & _upsample_bicubic2d_aa_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) { |
17794 | if (false) { |
17795 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17796 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17797 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17798 | auto grad_output_meta = to_meta(grad_output); |
17799 | auto grad_input_meta = to_meta(grad_input); |
17800 | at::AutoDispatchSkipFunctionalize func_guard; |
17801 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17802 | at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output_meta, output_size, input_size, align_corners, scales_h, scales_w, grad_input_meta); |
17803 | } |
17804 | |
17805 | at::Tensor grad_output_; |
17806 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
17807 | at::functionalization::impl::sync(grad_output); |
17808 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
17809 | } else { |
17810 | grad_output_ = grad_output; |
17811 | } |
17812 | |
17813 | at::Tensor grad_input_; |
17814 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
17815 | at::functionalization::impl::sync(grad_input); |
17816 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
17817 | } else { |
17818 | grad_input_ = grad_input; |
17819 | } |
17820 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
17821 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) { |
17822 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17823 | TORCH_INTERNAL_ASSERT(false, |
17824 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17825 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17826 | } else { |
17827 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17828 | at::AutoDispatchSkipFunctionalize guard; |
17829 | at::Tensor tmp_output = at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w, grad_input_); |
17830 | return grad_input;; |
17831 | } |
17832 | } else { |
17833 | at::Tensor tmp_output; |
17834 | { |
17835 | at::AutoDispatchSkipFunctionalize guard; |
17836 | tmp_output = at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w); |
17837 | } |
17838 | at::functionalization::impl::replace_(grad_input, tmp_output); |
17839 | at::functionalization::impl::commit_update(grad_input); |
17840 | at::functionalization::impl::sync(grad_input); |
17841 | return grad_input; |
17842 | } |
17843 | } |
17844 | |
17845 | at::Tensor & upsample_nearest1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) { |
17846 | if (false) { |
17847 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17848 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17849 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17850 | auto self_meta = to_meta(self); |
17851 | auto out_meta = to_meta(out); |
17852 | at::AutoDispatchSkipFunctionalize func_guard; |
17853 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17854 | at::_ops::upsample_nearest1d_out::call(self_meta, output_size, scales, out_meta); |
17855 | } |
17856 | |
17857 | at::Tensor self_; |
17858 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17859 | at::functionalization::impl::sync(self); |
17860 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17861 | } else { |
17862 | self_ = self; |
17863 | } |
17864 | |
17865 | at::Tensor out_; |
17866 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17867 | at::functionalization::impl::sync(out); |
17868 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17869 | } else { |
17870 | out_ = out; |
17871 | } |
17872 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17873 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17874 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17875 | TORCH_INTERNAL_ASSERT(false, |
17876 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17877 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17878 | } else { |
17879 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17880 | at::AutoDispatchSkipFunctionalize guard; |
17881 | at::Tensor tmp_output = at::_ops::upsample_nearest1d_out::call(self_, output_size, scales, out_); |
17882 | return out;; |
17883 | } |
17884 | } else { |
17885 | at::Tensor tmp_output; |
17886 | { |
17887 | at::AutoDispatchSkipFunctionalize guard; |
17888 | tmp_output = at::_ops::upsample_nearest1d::call(self_, output_size, scales); |
17889 | } |
17890 | at::functionalization::impl::replace_(out, tmp_output); |
17891 | at::functionalization::impl::commit_update(out); |
17892 | at::functionalization::impl::sync(out); |
17893 | return out; |
17894 | } |
17895 | } |
17896 | |
17897 | at::Tensor & _upsample_nearest_exact1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) { |
17898 | if (false) { |
17899 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17900 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17901 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17902 | auto self_meta = to_meta(self); |
17903 | auto out_meta = to_meta(out); |
17904 | at::AutoDispatchSkipFunctionalize func_guard; |
17905 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17906 | at::_ops::_upsample_nearest_exact1d_out::call(self_meta, output_size, scales, out_meta); |
17907 | } |
17908 | |
17909 | at::Tensor self_; |
17910 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17911 | at::functionalization::impl::sync(self); |
17912 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17913 | } else { |
17914 | self_ = self; |
17915 | } |
17916 | |
17917 | at::Tensor out_; |
17918 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17919 | at::functionalization::impl::sync(out); |
17920 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17921 | } else { |
17922 | out_ = out; |
17923 | } |
17924 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17925 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17926 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17927 | TORCH_INTERNAL_ASSERT(false, |
17928 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17929 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17930 | } else { |
17931 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17932 | at::AutoDispatchSkipFunctionalize guard; |
17933 | at::Tensor tmp_output = at::_ops::_upsample_nearest_exact1d_out::call(self_, output_size, scales, out_); |
17934 | return out;; |
17935 | } |
17936 | } else { |
17937 | at::Tensor tmp_output; |
17938 | { |
17939 | at::AutoDispatchSkipFunctionalize guard; |
17940 | tmp_output = at::_ops::_upsample_nearest_exact1d::call(self_, output_size, scales); |
17941 | } |
17942 | at::functionalization::impl::replace_(out, tmp_output); |
17943 | at::functionalization::impl::commit_update(out); |
17944 | at::functionalization::impl::sync(out); |
17945 | return out; |
17946 | } |
17947 | } |
17948 | |
17949 | at::Tensor & upsample_nearest1d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) { |
17950 | if (false) { |
17951 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17952 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17953 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17954 | auto grad_output_meta = to_meta(grad_output); |
17955 | auto grad_input_meta = to_meta(grad_input); |
17956 | at::AutoDispatchSkipFunctionalize func_guard; |
17957 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17958 | at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output_meta, output_size, input_size, scales, grad_input_meta); |
17959 | } |
17960 | |
17961 | at::Tensor grad_output_; |
17962 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
17963 | at::functionalization::impl::sync(grad_output); |
17964 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
17965 | } else { |
17966 | grad_output_ = grad_output; |
17967 | } |
17968 | |
17969 | at::Tensor grad_input_; |
17970 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
17971 | at::functionalization::impl::sync(grad_input); |
17972 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
17973 | } else { |
17974 | grad_input_ = grad_input; |
17975 | } |
17976 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
17977 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) { |
17978 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17979 | TORCH_INTERNAL_ASSERT(false, |
17980 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17981 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17982 | } else { |
17983 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17984 | at::AutoDispatchSkipFunctionalize guard; |
17985 | at::Tensor tmp_output = at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output_, output_size, input_size, scales, grad_input_); |
17986 | return grad_input;; |
17987 | } |
17988 | } else { |
17989 | at::Tensor tmp_output; |
17990 | { |
17991 | at::AutoDispatchSkipFunctionalize guard; |
17992 | tmp_output = at::_ops::upsample_nearest1d_backward::call(grad_output_, output_size, input_size, scales); |
17993 | } |
17994 | at::functionalization::impl::replace_(grad_input, tmp_output); |
17995 | at::functionalization::impl::commit_update(grad_input); |
17996 | at::functionalization::impl::sync(grad_input); |
17997 | return grad_input; |
17998 | } |
17999 | } |
18000 | |
18001 | at::Tensor & upsample_nearest3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) { |
18002 | if (false) { |
18003 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18004 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18005 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18006 | auto self_meta = to_meta(self); |
18007 | auto out_meta = to_meta(out); |
18008 | at::AutoDispatchSkipFunctionalize func_guard; |
18009 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18010 | at::_ops::upsample_nearest3d_out::call(self_meta, output_size, scales_d, scales_h, scales_w, out_meta); |
18011 | } |
18012 | |
18013 | at::Tensor self_; |
18014 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18015 | at::functionalization::impl::sync(self); |
18016 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18017 | } else { |
18018 | self_ = self; |
18019 | } |
18020 | |
18021 | at::Tensor out_; |
18022 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18023 | at::functionalization::impl::sync(out); |
18024 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18025 | } else { |
18026 | out_ = out; |
18027 | } |
18028 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18029 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18030 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18031 | TORCH_INTERNAL_ASSERT(false, |
18032 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18033 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18034 | } else { |
18035 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18036 | at::AutoDispatchSkipFunctionalize guard; |
18037 | at::Tensor tmp_output = at::_ops::upsample_nearest3d_out::call(self_, output_size, scales_d, scales_h, scales_w, out_); |
18038 | return out;; |
18039 | } |
18040 | } else { |
18041 | at::Tensor tmp_output; |
18042 | { |
18043 | at::AutoDispatchSkipFunctionalize guard; |
18044 | tmp_output = at::_ops::upsample_nearest3d::call(self_, output_size, scales_d, scales_h, scales_w); |
18045 | } |
18046 | at::functionalization::impl::replace_(out, tmp_output); |
18047 | at::functionalization::impl::commit_update(out); |
18048 | at::functionalization::impl::sync(out); |
18049 | return out; |
18050 | } |
18051 | } |
18052 | |
18053 | at::Tensor & slow_conv_transpose3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) { |
18054 | if (false) { |
18055 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18056 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18057 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18058 | auto self_meta = to_meta(self); |
18059 | auto weight_meta = to_meta(weight); |
18060 | auto bias_meta = to_meta(bias); |
18061 | auto out_meta = to_meta(out); |
18062 | at::AutoDispatchSkipFunctionalize func_guard; |
18063 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18064 | at::_ops::slow_conv_transpose3d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, output_padding, dilation, out_meta); |
18065 | } |
18066 | |
18067 | at::Tensor self_; |
18068 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18069 | at::functionalization::impl::sync(self); |
18070 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18071 | } else { |
18072 | self_ = self; |
18073 | } |
18074 | |
18075 | at::Tensor weight_; |
18076 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
18077 | at::functionalization::impl::sync(weight); |
18078 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
18079 | } else { |
18080 | weight_ = weight; |
18081 | } |
18082 | |
18083 | c10::optional<at::Tensor> bias_; |
18084 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
18085 | at::functionalization::impl::sync(bias); |
18086 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
18087 | } else { |
18088 | bias_ = bias; |
18089 | } |
18090 | |
18091 | at::Tensor out_; |
18092 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18093 | at::functionalization::impl::sync(out); |
18094 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18095 | } else { |
18096 | out_ = out; |
18097 | } |
18098 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18099 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
18100 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18101 | TORCH_INTERNAL_ASSERT(false, |
18102 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18103 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18104 | } else { |
18105 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18106 | at::AutoDispatchSkipFunctionalize guard; |
18107 | at::Tensor tmp_output = at::_ops::slow_conv_transpose3d_out::call(self_, weight_, kernel_size, bias_, stride, padding, output_padding, dilation, out_); |
18108 | return out;; |
18109 | } |
18110 | } else { |
18111 | at::Tensor tmp_output; |
18112 | { |
18113 | at::AutoDispatchSkipFunctionalize guard; |
18114 | tmp_output = at::_ops::slow_conv_transpose3d::call(self_, weight_, kernel_size, bias_, stride, padding, output_padding, dilation); |
18115 | } |
18116 | at::functionalization::impl::replace_(out, tmp_output); |
18117 | at::functionalization::impl::commit_update(out); |
18118 | at::functionalization::impl::sync(out); |
18119 | return out; |
18120 | } |
18121 | } |
18122 | |
18123 | at::Tensor & isposinf_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18124 | if (false) { |
18125 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18126 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18127 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18128 | auto self_meta = to_meta(self); |
18129 | auto out_meta = to_meta(out); |
18130 | at::AutoDispatchSkipFunctionalize func_guard; |
18131 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18132 | at::_ops::isposinf_out::call(self_meta, out_meta); |
18133 | } |
18134 | |
18135 | at::Tensor self_; |
18136 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18137 | at::functionalization::impl::sync(self); |
18138 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18139 | } else { |
18140 | self_ = self; |
18141 | } |
18142 | |
18143 | at::Tensor out_; |
18144 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18145 | at::functionalization::impl::sync(out); |
18146 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18147 | } else { |
18148 | out_ = out; |
18149 | } |
18150 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18151 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18152 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18153 | TORCH_INTERNAL_ASSERT(false, |
18154 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18155 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18156 | } else { |
18157 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18158 | at::AutoDispatchSkipFunctionalize guard; |
18159 | at::Tensor tmp_output = at::_ops::isposinf_out::call(self_, out_); |
18160 | return out;; |
18161 | } |
18162 | } else { |
18163 | at::Tensor tmp_output; |
18164 | { |
18165 | at::AutoDispatchSkipFunctionalize guard; |
18166 | tmp_output = at::_ops::isposinf::call(self_); |
18167 | } |
18168 | at::functionalization::impl::replace_(out, tmp_output); |
18169 | at::functionalization::impl::commit_update(out); |
18170 | at::functionalization::impl::sync(out); |
18171 | return out; |
18172 | } |
18173 | } |
18174 | |
18175 | at::Tensor & special_entr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18176 | if (false) { |
18177 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18178 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18179 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18180 | auto self_meta = to_meta(self); |
18181 | auto out_meta = to_meta(out); |
18182 | at::AutoDispatchSkipFunctionalize func_guard; |
18183 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18184 | at::_ops::special_entr_out::call(self_meta, out_meta); |
18185 | } |
18186 | |
18187 | at::Tensor self_; |
18188 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18189 | at::functionalization::impl::sync(self); |
18190 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18191 | } else { |
18192 | self_ = self; |
18193 | } |
18194 | |
18195 | at::Tensor out_; |
18196 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18197 | at::functionalization::impl::sync(out); |
18198 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18199 | } else { |
18200 | out_ = out; |
18201 | } |
18202 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18203 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18204 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18205 | TORCH_INTERNAL_ASSERT(false, |
18206 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18207 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18208 | } else { |
18209 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18210 | at::AutoDispatchSkipFunctionalize guard; |
18211 | at::Tensor tmp_output = at::_ops::special_entr_out::call(self_, out_); |
18212 | return out;; |
18213 | } |
18214 | } else { |
18215 | at::Tensor tmp_output; |
18216 | { |
18217 | at::AutoDispatchSkipFunctionalize guard; |
18218 | tmp_output = at::_ops::special_entr::call(self_); |
18219 | } |
18220 | at::functionalization::impl::replace_(out, tmp_output); |
18221 | at::functionalization::impl::commit_update(out); |
18222 | at::functionalization::impl::sync(out); |
18223 | return out; |
18224 | } |
18225 | } |
18226 | |
18227 | at::Tensor & special_psi_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18228 | if (false) { |
18229 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18230 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18231 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18232 | auto self_meta = to_meta(self); |
18233 | auto out_meta = to_meta(out); |
18234 | at::AutoDispatchSkipFunctionalize func_guard; |
18235 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18236 | at::_ops::special_psi_out::call(self_meta, out_meta); |
18237 | } |
18238 | |
18239 | at::Tensor self_; |
18240 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18241 | at::functionalization::impl::sync(self); |
18242 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18243 | } else { |
18244 | self_ = self; |
18245 | } |
18246 | |
18247 | at::Tensor out_; |
18248 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18249 | at::functionalization::impl::sync(out); |
18250 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18251 | } else { |
18252 | out_ = out; |
18253 | } |
18254 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18255 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18256 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18257 | TORCH_INTERNAL_ASSERT(false, |
18258 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18259 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18260 | } else { |
18261 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18262 | at::AutoDispatchSkipFunctionalize guard; |
18263 | at::Tensor tmp_output = at::_ops::special_psi_out::call(self_, out_); |
18264 | return out;; |
18265 | } |
18266 | } else { |
18267 | at::Tensor tmp_output; |
18268 | { |
18269 | at::AutoDispatchSkipFunctionalize guard; |
18270 | tmp_output = at::_ops::special_psi::call(self_); |
18271 | } |
18272 | at::functionalization::impl::replace_(out, tmp_output); |
18273 | at::functionalization::impl::commit_update(out); |
18274 | at::functionalization::impl::sync(out); |
18275 | return out; |
18276 | } |
18277 | } |
18278 | |
18279 | at::Tensor & special_erfinv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18280 | if (false) { |
18281 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18282 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18283 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18284 | auto self_meta = to_meta(self); |
18285 | auto out_meta = to_meta(out); |
18286 | at::AutoDispatchSkipFunctionalize func_guard; |
18287 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18288 | at::_ops::special_erfinv_out::call(self_meta, out_meta); |
18289 | } |
18290 | |
18291 | at::Tensor self_; |
18292 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18293 | at::functionalization::impl::sync(self); |
18294 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18295 | } else { |
18296 | self_ = self; |
18297 | } |
18298 | |
18299 | at::Tensor out_; |
18300 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18301 | at::functionalization::impl::sync(out); |
18302 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18303 | } else { |
18304 | out_ = out; |
18305 | } |
18306 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18307 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18308 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18309 | TORCH_INTERNAL_ASSERT(false, |
18310 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18311 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18312 | } else { |
18313 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18314 | at::AutoDispatchSkipFunctionalize guard; |
18315 | at::Tensor tmp_output = at::_ops::special_erfinv_out::call(self_, out_); |
18316 | return out;; |
18317 | } |
18318 | } else { |
18319 | at::Tensor tmp_output; |
18320 | { |
18321 | at::AutoDispatchSkipFunctionalize guard; |
18322 | tmp_output = at::_ops::special_erfinv::call(self_); |
18323 | } |
18324 | at::functionalization::impl::replace_(out, tmp_output); |
18325 | at::functionalization::impl::commit_update(out); |
18326 | at::functionalization::impl::sync(out); |
18327 | return out; |
18328 | } |
18329 | } |
18330 | |
18331 | at::Tensor & special_ndtr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18332 | if (false) { |
18333 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18334 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18335 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18336 | auto self_meta = to_meta(self); |
18337 | auto out_meta = to_meta(out); |
18338 | at::AutoDispatchSkipFunctionalize func_guard; |
18339 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18340 | at::_ops::special_ndtr_out::call(self_meta, out_meta); |
18341 | } |
18342 | |
18343 | at::Tensor self_; |
18344 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18345 | at::functionalization::impl::sync(self); |
18346 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18347 | } else { |
18348 | self_ = self; |
18349 | } |
18350 | |
18351 | at::Tensor out_; |
18352 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18353 | at::functionalization::impl::sync(out); |
18354 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18355 | } else { |
18356 | out_ = out; |
18357 | } |
18358 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18359 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18360 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18361 | TORCH_INTERNAL_ASSERT(false, |
18362 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18363 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18364 | } else { |
18365 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18366 | at::AutoDispatchSkipFunctionalize guard; |
18367 | at::Tensor tmp_output = at::_ops::special_ndtr_out::call(self_, out_); |
18368 | return out;; |
18369 | } |
18370 | } else { |
18371 | at::Tensor tmp_output; |
18372 | { |
18373 | at::AutoDispatchSkipFunctionalize guard; |
18374 | tmp_output = at::_ops::special_ndtr::call(self_); |
18375 | } |
18376 | at::functionalization::impl::replace_(out, tmp_output); |
18377 | at::functionalization::impl::commit_update(out); |
18378 | at::functionalization::impl::sync(out); |
18379 | return out; |
18380 | } |
18381 | } |
18382 | |
18383 | at::Tensor & special_xlogy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
18384 | if (false) { |
18385 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18386 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18387 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18388 | auto self_meta = to_meta(self); |
18389 | auto other_meta = to_meta(other); |
18390 | auto out_meta = to_meta(out); |
18391 | at::AutoDispatchSkipFunctionalize func_guard; |
18392 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18393 | at::_ops::special_xlogy_out::call(self_meta, other_meta, out_meta); |
18394 | } |
18395 | |
18396 | at::Tensor self_; |
18397 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18398 | at::functionalization::impl::sync(self); |
18399 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18400 | } else { |
18401 | self_ = self; |
18402 | } |
18403 | |
18404 | at::Tensor other_; |
18405 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
18406 | at::functionalization::impl::sync(other); |
18407 | other_ = at::functionalization::impl::from_functional_tensor(other); |
18408 | } else { |
18409 | other_ = other; |
18410 | } |
18411 | |
18412 | at::Tensor out_; |
18413 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18414 | at::functionalization::impl::sync(out); |
18415 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18416 | } else { |
18417 | out_ = out; |
18418 | } |
18419 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18420 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
18421 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18422 | TORCH_INTERNAL_ASSERT(false, |
18423 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18424 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18425 | } else { |
18426 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18427 | at::AutoDispatchSkipFunctionalize guard; |
18428 | at::Tensor tmp_output = at::_ops::special_xlogy_out::call(self_, other_, out_); |
18429 | return out;; |
18430 | } |
18431 | } else { |
18432 | at::Tensor tmp_output; |
18433 | { |
18434 | at::AutoDispatchSkipFunctionalize guard; |
18435 | tmp_output = at::_ops::special_xlogy::call(self_, other_); |
18436 | } |
18437 | at::functionalization::impl::replace_(out, tmp_output); |
18438 | at::functionalization::impl::commit_update(out); |
18439 | at::functionalization::impl::sync(out); |
18440 | return out; |
18441 | } |
18442 | } |
18443 | |
18444 | at::Tensor & special_xlogy_out_self_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
18445 | if (false) { |
18446 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18447 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18448 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18449 | auto other_meta = to_meta(other); |
18450 | auto out_meta = to_meta(out); |
18451 | at::AutoDispatchSkipFunctionalize func_guard; |
18452 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18453 | at::_ops::special_xlogy_self_scalar_out::call(self, other_meta, out_meta); |
18454 | } |
18455 | |
18456 | at::Tensor other_; |
18457 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
18458 | at::functionalization::impl::sync(other); |
18459 | other_ = at::functionalization::impl::from_functional_tensor(other); |
18460 | } else { |
18461 | other_ = other; |
18462 | } |
18463 | |
18464 | at::Tensor out_; |
18465 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18466 | at::functionalization::impl::sync(out); |
18467 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18468 | } else { |
18469 | out_ = out; |
18470 | } |
18471 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18472 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
18473 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18474 | TORCH_INTERNAL_ASSERT(false, |
18475 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18476 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18477 | } else { |
18478 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18479 | at::AutoDispatchSkipFunctionalize guard; |
18480 | at::Tensor tmp_output = at::_ops::special_xlogy_self_scalar_out::call(self, other_, out_); |
18481 | return out;; |
18482 | } |
18483 | } else { |
18484 | at::Tensor tmp_output; |
18485 | { |
18486 | at::AutoDispatchSkipFunctionalize guard; |
18487 | tmp_output = at::_ops::special_xlogy_self_scalar::call(self, other_); |
18488 | } |
18489 | at::functionalization::impl::replace_(out, tmp_output); |
18490 | at::functionalization::impl::commit_update(out); |
18491 | at::functionalization::impl::sync(out); |
18492 | return out; |
18493 | } |
18494 | } |
18495 | |
18496 | at::Tensor & special_xlogy_out_other_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
18497 | if (false) { |
18498 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18499 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18500 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18501 | auto self_meta = to_meta(self); |
18502 | auto out_meta = to_meta(out); |
18503 | at::AutoDispatchSkipFunctionalize func_guard; |
18504 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18505 | at::_ops::special_xlogy_other_scalar_out::call(self_meta, other, out_meta); |
18506 | } |
18507 | |
18508 | at::Tensor self_; |
18509 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18510 | at::functionalization::impl::sync(self); |
18511 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18512 | } else { |
18513 | self_ = self; |
18514 | } |
18515 | |
18516 | at::Tensor out_; |
18517 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18518 | at::functionalization::impl::sync(out); |
18519 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18520 | } else { |
18521 | out_ = out; |
18522 | } |
18523 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18524 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18525 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18526 | TORCH_INTERNAL_ASSERT(false, |
18527 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18528 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18529 | } else { |
18530 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18531 | at::AutoDispatchSkipFunctionalize guard; |
18532 | at::Tensor tmp_output = at::_ops::special_xlogy_other_scalar_out::call(self_, other, out_); |
18533 | return out;; |
18534 | } |
18535 | } else { |
18536 | at::Tensor tmp_output; |
18537 | { |
18538 | at::AutoDispatchSkipFunctionalize guard; |
18539 | tmp_output = at::_ops::special_xlogy_other_scalar::call(self_, other); |
18540 | } |
18541 | at::functionalization::impl::replace_(out, tmp_output); |
18542 | at::functionalization::impl::commit_update(out); |
18543 | at::functionalization::impl::sync(out); |
18544 | return out; |
18545 | } |
18546 | } |
18547 | |
18548 | at::Tensor & special_zeta_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
18549 | if (false) { |
18550 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18551 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18552 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18553 | auto self_meta = to_meta(self); |
18554 | auto other_meta = to_meta(other); |
18555 | auto out_meta = to_meta(out); |
18556 | at::AutoDispatchSkipFunctionalize func_guard; |
18557 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18558 | at::_ops::special_zeta_out::call(self_meta, other_meta, out_meta); |
18559 | } |
18560 | |
18561 | at::Tensor self_; |
18562 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18563 | at::functionalization::impl::sync(self); |
18564 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18565 | } else { |
18566 | self_ = self; |
18567 | } |
18568 | |
18569 | at::Tensor other_; |
18570 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
18571 | at::functionalization::impl::sync(other); |
18572 | other_ = at::functionalization::impl::from_functional_tensor(other); |
18573 | } else { |
18574 | other_ = other; |
18575 | } |
18576 | |
18577 | at::Tensor out_; |
18578 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18579 | at::functionalization::impl::sync(out); |
18580 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18581 | } else { |
18582 | out_ = out; |
18583 | } |
18584 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18585 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
18586 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18587 | TORCH_INTERNAL_ASSERT(false, |
18588 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18589 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18590 | } else { |
18591 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18592 | at::AutoDispatchSkipFunctionalize guard; |
18593 | at::Tensor tmp_output = at::_ops::special_zeta_out::call(self_, other_, out_); |
18594 | return out;; |
18595 | } |
18596 | } else { |
18597 | at::Tensor tmp_output; |
18598 | { |
18599 | at::AutoDispatchSkipFunctionalize guard; |
18600 | tmp_output = at::_ops::special_zeta::call(self_, other_); |
18601 | } |
18602 | at::functionalization::impl::replace_(out, tmp_output); |
18603 | at::functionalization::impl::commit_update(out); |
18604 | at::functionalization::impl::sync(out); |
18605 | return out; |
18606 | } |
18607 | } |
18608 | |
18609 | at::Tensor & special_zeta_out_self_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
18610 | if (false) { |
18611 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18612 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18613 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18614 | auto other_meta = to_meta(other); |
18615 | auto out_meta = to_meta(out); |
18616 | at::AutoDispatchSkipFunctionalize func_guard; |
18617 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18618 | at::_ops::special_zeta_self_scalar_out::call(self, other_meta, out_meta); |
18619 | } |
18620 | |
18621 | at::Tensor other_; |
18622 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
18623 | at::functionalization::impl::sync(other); |
18624 | other_ = at::functionalization::impl::from_functional_tensor(other); |
18625 | } else { |
18626 | other_ = other; |
18627 | } |
18628 | |
18629 | at::Tensor out_; |
18630 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18631 | at::functionalization::impl::sync(out); |
18632 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18633 | } else { |
18634 | out_ = out; |
18635 | } |
18636 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18637 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
18638 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18639 | TORCH_INTERNAL_ASSERT(false, |
18640 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18641 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18642 | } else { |
18643 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18644 | at::AutoDispatchSkipFunctionalize guard; |
18645 | at::Tensor tmp_output = at::_ops::special_zeta_self_scalar_out::call(self, other_, out_); |
18646 | return out;; |
18647 | } |
18648 | } else { |
18649 | at::Tensor tmp_output; |
18650 | { |
18651 | at::AutoDispatchSkipFunctionalize guard; |
18652 | tmp_output = at::_ops::special_zeta_self_scalar::call(self, other_); |
18653 | } |
18654 | at::functionalization::impl::replace_(out, tmp_output); |
18655 | at::functionalization::impl::commit_update(out); |
18656 | at::functionalization::impl::sync(out); |
18657 | return out; |
18658 | } |
18659 | } |
18660 | |
18661 | at::Tensor & special_zeta_out_other_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
18662 | if (false) { |
18663 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18664 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18665 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18666 | auto self_meta = to_meta(self); |
18667 | auto out_meta = to_meta(out); |
18668 | at::AutoDispatchSkipFunctionalize func_guard; |
18669 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18670 | at::_ops::special_zeta_other_scalar_out::call(self_meta, other, out_meta); |
18671 | } |
18672 | |
18673 | at::Tensor self_; |
18674 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18675 | at::functionalization::impl::sync(self); |
18676 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18677 | } else { |
18678 | self_ = self; |
18679 | } |
18680 | |
18681 | at::Tensor out_; |
18682 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18683 | at::functionalization::impl::sync(out); |
18684 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18685 | } else { |
18686 | out_ = out; |
18687 | } |
18688 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18689 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18690 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18691 | TORCH_INTERNAL_ASSERT(false, |
18692 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18693 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18694 | } else { |
18695 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18696 | at::AutoDispatchSkipFunctionalize guard; |
18697 | at::Tensor tmp_output = at::_ops::special_zeta_other_scalar_out::call(self_, other, out_); |
18698 | return out;; |
18699 | } |
18700 | } else { |
18701 | at::Tensor tmp_output; |
18702 | { |
18703 | at::AutoDispatchSkipFunctionalize guard; |
18704 | tmp_output = at::_ops::special_zeta_other_scalar::call(self_, other); |
18705 | } |
18706 | at::functionalization::impl::replace_(out, tmp_output); |
18707 | at::functionalization::impl::commit_update(out); |
18708 | at::functionalization::impl::sync(out); |
18709 | return out; |
18710 | } |
18711 | } |
18712 | |
18713 | at::Tensor & special_i0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18714 | if (false) { |
18715 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18716 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18717 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18718 | auto self_meta = to_meta(self); |
18719 | auto out_meta = to_meta(out); |
18720 | at::AutoDispatchSkipFunctionalize func_guard; |
18721 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18722 | at::_ops::special_i0_out::call(self_meta, out_meta); |
18723 | } |
18724 | |
18725 | at::Tensor self_; |
18726 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18727 | at::functionalization::impl::sync(self); |
18728 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18729 | } else { |
18730 | self_ = self; |
18731 | } |
18732 | |
18733 | at::Tensor out_; |
18734 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18735 | at::functionalization::impl::sync(out); |
18736 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18737 | } else { |
18738 | out_ = out; |
18739 | } |
18740 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18741 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18742 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18743 | TORCH_INTERNAL_ASSERT(false, |
18744 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18745 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18746 | } else { |
18747 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18748 | at::AutoDispatchSkipFunctionalize guard; |
18749 | at::Tensor tmp_output = at::_ops::special_i0_out::call(self_, out_); |
18750 | return out;; |
18751 | } |
18752 | } else { |
18753 | at::Tensor tmp_output; |
18754 | { |
18755 | at::AutoDispatchSkipFunctionalize guard; |
18756 | tmp_output = at::_ops::special_i0::call(self_); |
18757 | } |
18758 | at::functionalization::impl::replace_(out, tmp_output); |
18759 | at::functionalization::impl::commit_update(out); |
18760 | at::functionalization::impl::sync(out); |
18761 | return out; |
18762 | } |
18763 | } |
18764 | |
18765 | at::Tensor & special_i0e_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18766 | if (false) { |
18767 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18768 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18769 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18770 | auto self_meta = to_meta(self); |
18771 | auto out_meta = to_meta(out); |
18772 | at::AutoDispatchSkipFunctionalize func_guard; |
18773 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18774 | at::_ops::special_i0e_out::call(self_meta, out_meta); |
18775 | } |
18776 | |
18777 | at::Tensor self_; |
18778 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18779 | at::functionalization::impl::sync(self); |
18780 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18781 | } else { |
18782 | self_ = self; |
18783 | } |
18784 | |
18785 | at::Tensor out_; |
18786 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18787 | at::functionalization::impl::sync(out); |
18788 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18789 | } else { |
18790 | out_ = out; |
18791 | } |
18792 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18793 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18794 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18795 | TORCH_INTERNAL_ASSERT(false, |
18796 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18797 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18798 | } else { |
18799 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18800 | at::AutoDispatchSkipFunctionalize guard; |
18801 | at::Tensor tmp_output = at::_ops::special_i0e_out::call(self_, out_); |
18802 | return out;; |
18803 | } |
18804 | } else { |
18805 | at::Tensor tmp_output; |
18806 | { |
18807 | at::AutoDispatchSkipFunctionalize guard; |
18808 | tmp_output = at::_ops::special_i0e::call(self_); |
18809 | } |
18810 | at::functionalization::impl::replace_(out, tmp_output); |
18811 | at::functionalization::impl::commit_update(out); |
18812 | at::functionalization::impl::sync(out); |
18813 | return out; |
18814 | } |
18815 | } |
18816 | |
18817 | at::Tensor & special_expit_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18818 | if (false) { |
18819 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18820 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18821 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18822 | auto self_meta = to_meta(self); |
18823 | auto out_meta = to_meta(out); |
18824 | at::AutoDispatchSkipFunctionalize func_guard; |
18825 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18826 | at::_ops::special_expit_out::call(self_meta, out_meta); |
18827 | } |
18828 | |
18829 | at::Tensor self_; |
18830 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18831 | at::functionalization::impl::sync(self); |
18832 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18833 | } else { |
18834 | self_ = self; |
18835 | } |
18836 | |
18837 | at::Tensor out_; |
18838 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18839 | at::functionalization::impl::sync(out); |
18840 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18841 | } else { |
18842 | out_ = out; |
18843 | } |
18844 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18845 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18846 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18847 | TORCH_INTERNAL_ASSERT(false, |
18848 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18849 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18850 | } else { |
18851 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18852 | at::AutoDispatchSkipFunctionalize guard; |
18853 | at::Tensor tmp_output = at::_ops::special_expit_out::call(self_, out_); |
18854 | return out;; |
18855 | } |
18856 | } else { |
18857 | at::Tensor tmp_output; |
18858 | { |
18859 | at::AutoDispatchSkipFunctionalize guard; |
18860 | tmp_output = at::_ops::special_expit::call(self_); |
18861 | } |
18862 | at::functionalization::impl::replace_(out, tmp_output); |
18863 | at::functionalization::impl::commit_update(out); |
18864 | at::functionalization::impl::sync(out); |
18865 | return out; |
18866 | } |
18867 | } |
18868 | |
18869 | at::Tensor & special_round_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out) { |
18870 | if (false) { |
18871 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18872 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18873 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18874 | auto self_meta = to_meta(self); |
18875 | auto out_meta = to_meta(out); |
18876 | at::AutoDispatchSkipFunctionalize func_guard; |
18877 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18878 | at::_ops::special_round_out::call(self_meta, decimals, out_meta); |
18879 | } |
18880 | |
18881 | at::Tensor self_; |
18882 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18883 | at::functionalization::impl::sync(self); |
18884 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18885 | } else { |
18886 | self_ = self; |
18887 | } |
18888 | |
18889 | at::Tensor out_; |
18890 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18891 | at::functionalization::impl::sync(out); |
18892 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18893 | } else { |
18894 | out_ = out; |
18895 | } |
18896 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18897 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18898 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18899 | TORCH_INTERNAL_ASSERT(false, |
18900 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18901 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18902 | } else { |
18903 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18904 | at::AutoDispatchSkipFunctionalize guard; |
18905 | at::Tensor tmp_output = at::_ops::special_round_out::call(self_, decimals, out_); |
18906 | return out;; |
18907 | } |
18908 | } else { |
18909 | at::Tensor tmp_output; |
18910 | { |
18911 | at::AutoDispatchSkipFunctionalize guard; |
18912 | tmp_output = at::_ops::special_round::call(self_, decimals); |
18913 | } |
18914 | at::functionalization::impl::replace_(out, tmp_output); |
18915 | at::functionalization::impl::commit_update(out); |
18916 | at::functionalization::impl::sync(out); |
18917 | return out; |
18918 | } |
18919 | } |
18920 | |
18921 | at::Tensor & special_gammainc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
18922 | if (false) { |
18923 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18924 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18925 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18926 | auto self_meta = to_meta(self); |
18927 | auto other_meta = to_meta(other); |
18928 | auto out_meta = to_meta(out); |
18929 | at::AutoDispatchSkipFunctionalize func_guard; |
18930 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18931 | at::_ops::special_gammainc_out::call(self_meta, other_meta, out_meta); |
18932 | } |
18933 | |
18934 | at::Tensor self_; |
18935 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18936 | at::functionalization::impl::sync(self); |
18937 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18938 | } else { |
18939 | self_ = self; |
18940 | } |
18941 | |
18942 | at::Tensor other_; |
18943 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
18944 | at::functionalization::impl::sync(other); |
18945 | other_ = at::functionalization::impl::from_functional_tensor(other); |
18946 | } else { |
18947 | other_ = other; |
18948 | } |
18949 | |
18950 | at::Tensor out_; |
18951 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18952 | at::functionalization::impl::sync(out); |
18953 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18954 | } else { |
18955 | out_ = out; |
18956 | } |
18957 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18958 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
18959 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18960 | TORCH_INTERNAL_ASSERT(false, |
18961 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18962 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18963 | } else { |
18964 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18965 | at::AutoDispatchSkipFunctionalize guard; |
18966 | at::Tensor tmp_output = at::_ops::special_gammainc_out::call(self_, other_, out_); |
18967 | return out;; |
18968 | } |
18969 | } else { |
18970 | at::Tensor tmp_output; |
18971 | { |
18972 | at::AutoDispatchSkipFunctionalize guard; |
18973 | tmp_output = at::_ops::special_gammainc::call(self_, other_); |
18974 | } |
18975 | at::functionalization::impl::replace_(out, tmp_output); |
18976 | at::functionalization::impl::commit_update(out); |
18977 | at::functionalization::impl::sync(out); |
18978 | return out; |
18979 | } |
18980 | } |
18981 | |
18982 | at::Tensor & fft_irfft_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
18983 | if (false) { |
18984 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18985 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18986 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18987 | auto self_meta = to_meta(self); |
18988 | auto out_meta = to_meta(out); |
18989 | at::AutoDispatchSkipFunctionalize func_guard; |
18990 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18991 | at::_ops::fft_irfft_out::call(self_meta, n, dim, norm, out_meta); |
18992 | } |
18993 | |
18994 | at::Tensor self_; |
18995 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18996 | at::functionalization::impl::sync(self); |
18997 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18998 | } else { |
18999 | self_ = self; |
19000 | } |
19001 | |
19002 | at::Tensor out_; |
19003 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19004 | at::functionalization::impl::sync(out); |
19005 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19006 | } else { |
19007 | out_ = out; |
19008 | } |
19009 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19010 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19011 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19012 | TORCH_INTERNAL_ASSERT(false, |
19013 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19014 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19015 | } else { |
19016 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19017 | at::AutoDispatchSkipFunctionalize guard; |
19018 | at::Tensor tmp_output = at::_ops::fft_irfft_out::call(self_, n, dim, norm, out_); |
19019 | return out;; |
19020 | } |
19021 | } else { |
19022 | at::Tensor tmp_output; |
19023 | { |
19024 | at::AutoDispatchSkipFunctionalize guard; |
19025 | tmp_output = at::_ops::fft_irfft::call(self_, n, dim, norm); |
19026 | } |
19027 | at::functionalization::impl::replace_(out, tmp_output); |
19028 | at::functionalization::impl::commit_update(out); |
19029 | at::functionalization::impl::sync(out); |
19030 | return out; |
19031 | } |
19032 | } |
19033 | |
19034 | at::Tensor & fft_fft2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
19035 | if (false) { |
19036 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19037 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19038 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19039 | auto self_meta = to_meta(self); |
19040 | auto out_meta = to_meta(out); |
19041 | at::AutoDispatchSkipFunctionalize func_guard; |
19042 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19043 | at::_ops::fft_fft2_out::call(self_meta, s, dim, norm, out_meta); |
19044 | } |
19045 | |
19046 | at::Tensor self_; |
19047 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19048 | at::functionalization::impl::sync(self); |
19049 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19050 | } else { |
19051 | self_ = self; |
19052 | } |
19053 | |
19054 | at::Tensor out_; |
19055 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19056 | at::functionalization::impl::sync(out); |
19057 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19058 | } else { |
19059 | out_ = out; |
19060 | } |
19061 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19062 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19063 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19064 | TORCH_INTERNAL_ASSERT(false, |
19065 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19066 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19067 | } else { |
19068 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19069 | at::AutoDispatchSkipFunctionalize guard; |
19070 | at::Tensor tmp_output = at::_ops::fft_fft2_out::call(self_, s, dim, norm, out_); |
19071 | return out;; |
19072 | } |
19073 | } else { |
19074 | at::Tensor tmp_output; |
19075 | { |
19076 | at::AutoDispatchSkipFunctionalize guard; |
19077 | tmp_output = at::_ops::fft_fft2::call(self_, s, dim, norm); |
19078 | } |
19079 | at::functionalization::impl::replace_(out, tmp_output); |
19080 | at::functionalization::impl::commit_update(out); |
19081 | at::functionalization::impl::sync(out); |
19082 | return out; |
19083 | } |
19084 | } |
19085 | |
19086 | at::Tensor & fft_rfftfreq_out_out(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out) { |
19087 | if (false) { |
19088 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19089 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19090 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19091 | auto out_meta = to_meta(out); |
19092 | at::AutoDispatchSkipFunctionalize func_guard; |
19093 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19094 | at::_ops::fft_rfftfreq_out::call(n, d, out_meta); |
19095 | } |
19096 | |
19097 | at::Tensor out_; |
19098 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19099 | at::functionalization::impl::sync(out); |
19100 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19101 | } else { |
19102 | out_ = out; |
19103 | } |
19104 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19105 | if ((false)) { |
19106 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19107 | TORCH_INTERNAL_ASSERT(false, |
19108 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19109 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19110 | } else { |
19111 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19112 | at::AutoDispatchSkipFunctionalize guard; |
19113 | at::Tensor tmp_output = at::_ops::fft_rfftfreq_out::call(n, d, out_); |
19114 | return out;; |
19115 | } |
19116 | } else { |
19117 | at::Tensor tmp_output; |
19118 | { |
19119 | at::AutoDispatchSkipFunctionalize guard; |
19120 | tmp_output = at::_ops::fft_rfftfreq::call(n, d, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
19121 | } |
19122 | at::functionalization::impl::replace_(out, tmp_output); |
19123 | at::functionalization::impl::commit_update(out); |
19124 | at::functionalization::impl::sync(out); |
19125 | return out; |
19126 | } |
19127 | } |
19128 | |
19129 | at::Tensor & linalg_cholesky_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) { |
19130 | if (false) { |
19131 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19132 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19133 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19134 | auto self_meta = to_meta(self); |
19135 | auto out_meta = to_meta(out); |
19136 | at::AutoDispatchSkipFunctionalize func_guard; |
19137 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19138 | at::_ops::linalg_cholesky_out::call(self_meta, upper, out_meta); |
19139 | } |
19140 | |
19141 | at::Tensor self_; |
19142 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19143 | at::functionalization::impl::sync(self); |
19144 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19145 | } else { |
19146 | self_ = self; |
19147 | } |
19148 | |
19149 | at::Tensor out_; |
19150 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19151 | at::functionalization::impl::sync(out); |
19152 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19153 | } else { |
19154 | out_ = out; |
19155 | } |
19156 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19157 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19158 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19159 | TORCH_INTERNAL_ASSERT(false, |
19160 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19161 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19162 | } else { |
19163 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19164 | at::AutoDispatchSkipFunctionalize guard; |
19165 | at::Tensor tmp_output = at::_ops::linalg_cholesky_out::call(self_, upper, out_); |
19166 | return out;; |
19167 | } |
19168 | } else { |
19169 | at::Tensor tmp_output; |
19170 | { |
19171 | at::AutoDispatchSkipFunctionalize guard; |
19172 | tmp_output = at::_ops::linalg_cholesky::call(self_, upper); |
19173 | } |
19174 | at::functionalization::impl::replace_(out, tmp_output); |
19175 | at::functionalization::impl::commit_update(out); |
19176 | at::functionalization::impl::sync(out); |
19177 | return out; |
19178 | } |
19179 | } |
19180 | |
19181 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) { |
19182 | if (false) { |
19183 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19184 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19185 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19186 | auto A_meta = to_meta(A); |
19187 | auto P_meta = to_meta(P); |
19188 | auto L_meta = to_meta(L); |
19189 | auto U_meta = to_meta(U); |
19190 | at::AutoDispatchSkipFunctionalize func_guard; |
19191 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19192 | at::_ops::linalg_lu_out::call(A_meta, pivot, P_meta, L_meta, U_meta); |
19193 | } |
19194 | |
19195 | at::Tensor A_; |
19196 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
19197 | at::functionalization::impl::sync(A); |
19198 | A_ = at::functionalization::impl::from_functional_tensor(A); |
19199 | } else { |
19200 | A_ = A; |
19201 | } |
19202 | |
19203 | at::Tensor P_; |
19204 | if (at::functionalization::impl::isFunctionalTensor(P)) { |
19205 | at::functionalization::impl::sync(P); |
19206 | P_ = at::functionalization::impl::from_functional_tensor(P); |
19207 | } else { |
19208 | P_ = P; |
19209 | } |
19210 | |
19211 | at::Tensor L_; |
19212 | if (at::functionalization::impl::isFunctionalTensor(L)) { |
19213 | at::functionalization::impl::sync(L); |
19214 | L_ = at::functionalization::impl::from_functional_tensor(L); |
19215 | } else { |
19216 | L_ = L; |
19217 | } |
19218 | |
19219 | at::Tensor U_; |
19220 | if (at::functionalization::impl::isFunctionalTensor(U)) { |
19221 | at::functionalization::impl::sync(U); |
19222 | U_ = at::functionalization::impl::from_functional_tensor(U); |
19223 | } else { |
19224 | U_ = U; |
19225 | } |
19226 | if (!(true && at::functionalization::impl::isFunctionalTensor(P) && at::functionalization::impl::isFunctionalTensor(L) && at::functionalization::impl::isFunctionalTensor(U))) { |
19227 | if ((false || at::functionalization::impl::isFunctionalTensor(A))) { |
19228 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19229 | TORCH_INTERNAL_ASSERT(false, |
19230 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19231 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19232 | } else { |
19233 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19234 | at::AutoDispatchSkipFunctionalize guard; |
19235 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_lu_out::call(A_, pivot, P_, L_, U_); |
19236 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(P, L, U);; |
19237 | } |
19238 | } else { |
19239 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
19240 | { |
19241 | at::AutoDispatchSkipFunctionalize guard; |
19242 | tmp_output = at::_ops::linalg_lu::call(A_, pivot); |
19243 | } |
19244 | at::functionalization::impl::replace_(P, std::get<0>(tmp_output)); |
19245 | at::functionalization::impl::commit_update(P); |
19246 | at::functionalization::impl::sync(P); |
19247 | at::functionalization::impl::replace_(L, std::get<1>(tmp_output)); |
19248 | at::functionalization::impl::commit_update(L); |
19249 | at::functionalization::impl::sync(L); |
19250 | at::functionalization::impl::replace_(U, std::get<2>(tmp_output)); |
19251 | at::functionalization::impl::commit_update(U); |
19252 | at::functionalization::impl::sync(U); |
19253 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(P, L, U); |
19254 | } |
19255 | } |
19256 | |
19257 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_det_out_result(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots) { |
19258 | if (false) { |
19259 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19260 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19261 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19262 | auto A_meta = to_meta(A); |
19263 | auto result_meta = to_meta(result); |
19264 | auto LU_meta = to_meta(LU); |
19265 | auto pivots_meta = to_meta(pivots); |
19266 | at::AutoDispatchSkipFunctionalize func_guard; |
19267 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19268 | at::_ops::_linalg_det_result::call(A_meta, result_meta, LU_meta, pivots_meta); |
19269 | } |
19270 | |
19271 | at::Tensor A_; |
19272 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
19273 | at::functionalization::impl::sync(A); |
19274 | A_ = at::functionalization::impl::from_functional_tensor(A); |
19275 | } else { |
19276 | A_ = A; |
19277 | } |
19278 | |
19279 | at::Tensor result_; |
19280 | if (at::functionalization::impl::isFunctionalTensor(result)) { |
19281 | at::functionalization::impl::sync(result); |
19282 | result_ = at::functionalization::impl::from_functional_tensor(result); |
19283 | } else { |
19284 | result_ = result; |
19285 | } |
19286 | |
19287 | at::Tensor LU_; |
19288 | if (at::functionalization::impl::isFunctionalTensor(LU)) { |
19289 | at::functionalization::impl::sync(LU); |
19290 | LU_ = at::functionalization::impl::from_functional_tensor(LU); |
19291 | } else { |
19292 | LU_ = LU; |
19293 | } |
19294 | |
19295 | at::Tensor pivots_; |
19296 | if (at::functionalization::impl::isFunctionalTensor(pivots)) { |
19297 | at::functionalization::impl::sync(pivots); |
19298 | pivots_ = at::functionalization::impl::from_functional_tensor(pivots); |
19299 | } else { |
19300 | pivots_ = pivots; |
19301 | } |
19302 | if (!(true && at::functionalization::impl::isFunctionalTensor(result) && at::functionalization::impl::isFunctionalTensor(LU) && at::functionalization::impl::isFunctionalTensor(pivots))) { |
19303 | if ((false || at::functionalization::impl::isFunctionalTensor(A))) { |
19304 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19305 | TORCH_INTERNAL_ASSERT(false, |
19306 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19307 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19308 | } else { |
19309 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19310 | at::AutoDispatchSkipFunctionalize guard; |
19311 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_linalg_det_result::call(A_, result_, LU_, pivots_); |
19312 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(result, LU, pivots);; |
19313 | } |
19314 | } else { |
19315 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
19316 | { |
19317 | at::AutoDispatchSkipFunctionalize guard; |
19318 | tmp_output = at::_ops::_linalg_det::call(A_); |
19319 | } |
19320 | at::functionalization::impl::replace_(result, std::get<0>(tmp_output)); |
19321 | at::functionalization::impl::commit_update(result); |
19322 | at::functionalization::impl::sync(result); |
19323 | at::functionalization::impl::replace_(LU, std::get<1>(tmp_output)); |
19324 | at::functionalization::impl::commit_update(LU); |
19325 | at::functionalization::impl::sync(LU); |
19326 | at::functionalization::impl::replace_(pivots, std::get<2>(tmp_output)); |
19327 | at::functionalization::impl::commit_update(pivots); |
19328 | at::functionalization::impl::sync(pivots); |
19329 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(result, LU, pivots); |
19330 | } |
19331 | } |
19332 | |
19333 | ::std::tuple<at::Tensor &,at::Tensor &> slogdet_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet) { |
19334 | if (false) { |
19335 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19336 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19337 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19338 | auto self_meta = to_meta(self); |
19339 | auto sign_meta = to_meta(sign); |
19340 | auto logabsdet_meta = to_meta(logabsdet); |
19341 | at::AutoDispatchSkipFunctionalize func_guard; |
19342 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19343 | at::_ops::slogdet_out::call(self_meta, sign_meta, logabsdet_meta); |
19344 | } |
19345 | |
19346 | at::Tensor self_; |
19347 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19348 | at::functionalization::impl::sync(self); |
19349 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19350 | } else { |
19351 | self_ = self; |
19352 | } |
19353 | |
19354 | at::Tensor sign_; |
19355 | if (at::functionalization::impl::isFunctionalTensor(sign)) { |
19356 | at::functionalization::impl::sync(sign); |
19357 | sign_ = at::functionalization::impl::from_functional_tensor(sign); |
19358 | } else { |
19359 | sign_ = sign; |
19360 | } |
19361 | |
19362 | at::Tensor logabsdet_; |
19363 | if (at::functionalization::impl::isFunctionalTensor(logabsdet)) { |
19364 | at::functionalization::impl::sync(logabsdet); |
19365 | logabsdet_ = at::functionalization::impl::from_functional_tensor(logabsdet); |
19366 | } else { |
19367 | logabsdet_ = logabsdet; |
19368 | } |
19369 | if (!(true && at::functionalization::impl::isFunctionalTensor(sign) && at::functionalization::impl::isFunctionalTensor(logabsdet))) { |
19370 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19371 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19372 | TORCH_INTERNAL_ASSERT(false, |
19373 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19374 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19375 | } else { |
19376 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19377 | at::AutoDispatchSkipFunctionalize guard; |
19378 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::slogdet_out::call(self_, sign_, logabsdet_); |
19379 | return ::std::tuple<at::Tensor &,at::Tensor &>(sign, logabsdet);; |
19380 | } |
19381 | } else { |
19382 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
19383 | { |
19384 | at::AutoDispatchSkipFunctionalize guard; |
19385 | tmp_output = at::_ops::slogdet::call(self_); |
19386 | } |
19387 | at::functionalization::impl::replace_(sign, std::get<0>(tmp_output)); |
19388 | at::functionalization::impl::commit_update(sign); |
19389 | at::functionalization::impl::sync(sign); |
19390 | at::functionalization::impl::replace_(logabsdet, std::get<1>(tmp_output)); |
19391 | at::functionalization::impl::commit_update(logabsdet); |
19392 | at::functionalization::impl::sync(logabsdet); |
19393 | return ::std::tuple<at::Tensor &,at::Tensor &>(sign, logabsdet); |
19394 | } |
19395 | } |
19396 | |
19397 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) { |
19398 | if (false) { |
19399 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19400 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19401 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19402 | auto self_meta = to_meta(self); |
19403 | auto eigenvalues_meta = to_meta(eigenvalues); |
19404 | auto eigenvectors_meta = to_meta(eigenvectors); |
19405 | at::AutoDispatchSkipFunctionalize func_guard; |
19406 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19407 | at::_ops::linalg_eig_out::call(self_meta, eigenvalues_meta, eigenvectors_meta); |
19408 | } |
19409 | |
19410 | at::Tensor self_; |
19411 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19412 | at::functionalization::impl::sync(self); |
19413 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19414 | } else { |
19415 | self_ = self; |
19416 | } |
19417 | |
19418 | at::Tensor eigenvalues_; |
19419 | if (at::functionalization::impl::isFunctionalTensor(eigenvalues)) { |
19420 | at::functionalization::impl::sync(eigenvalues); |
19421 | eigenvalues_ = at::functionalization::impl::from_functional_tensor(eigenvalues); |
19422 | } else { |
19423 | eigenvalues_ = eigenvalues; |
19424 | } |
19425 | |
19426 | at::Tensor eigenvectors_; |
19427 | if (at::functionalization::impl::isFunctionalTensor(eigenvectors)) { |
19428 | at::functionalization::impl::sync(eigenvectors); |
19429 | eigenvectors_ = at::functionalization::impl::from_functional_tensor(eigenvectors); |
19430 | } else { |
19431 | eigenvectors_ = eigenvectors; |
19432 | } |
19433 | if (!(true && at::functionalization::impl::isFunctionalTensor(eigenvalues) && at::functionalization::impl::isFunctionalTensor(eigenvectors))) { |
19434 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19435 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19436 | TORCH_INTERNAL_ASSERT(false, |
19437 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19438 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19439 | } else { |
19440 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19441 | at::AutoDispatchSkipFunctionalize guard; |
19442 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_eig_out::call(self_, eigenvalues_, eigenvectors_); |
19443 | return ::std::tuple<at::Tensor &,at::Tensor &>(eigenvalues, eigenvectors);; |
19444 | } |
19445 | } else { |
19446 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
19447 | { |
19448 | at::AutoDispatchSkipFunctionalize guard; |
19449 | tmp_output = at::_ops::linalg_eig::call(self_); |
19450 | } |
19451 | at::functionalization::impl::replace_(eigenvalues, std::get<0>(tmp_output)); |
19452 | at::functionalization::impl::commit_update(eigenvalues); |
19453 | at::functionalization::impl::sync(eigenvalues); |
19454 | at::functionalization::impl::replace_(eigenvectors, std::get<1>(tmp_output)); |
19455 | at::functionalization::impl::commit_update(eigenvectors); |
19456 | at::functionalization::impl::sync(eigenvectors); |
19457 | return ::std::tuple<at::Tensor &,at::Tensor &>(eigenvalues, eigenvectors); |
19458 | } |
19459 | } |
19460 | |
19461 | at::Tensor & linalg_inv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out) { |
19462 | if (false) { |
19463 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19464 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19465 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19466 | auto A_meta = to_meta(A); |
19467 | auto out_meta = to_meta(out); |
19468 | at::AutoDispatchSkipFunctionalize func_guard; |
19469 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19470 | at::_ops::linalg_inv_out::call(A_meta, out_meta); |
19471 | } |
19472 | |
19473 | at::Tensor A_; |
19474 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
19475 | at::functionalization::impl::sync(A); |
19476 | A_ = at::functionalization::impl::from_functional_tensor(A); |
19477 | } else { |
19478 | A_ = A; |
19479 | } |
19480 | |
19481 | at::Tensor out_; |
19482 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19483 | at::functionalization::impl::sync(out); |
19484 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19485 | } else { |
19486 | out_ = out; |
19487 | } |
19488 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19489 | if ((false || at::functionalization::impl::isFunctionalTensor(A))) { |
19490 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19491 | TORCH_INTERNAL_ASSERT(false, |
19492 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19493 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19494 | } else { |
19495 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19496 | at::AutoDispatchSkipFunctionalize guard; |
19497 | at::Tensor tmp_output = at::_ops::linalg_inv_out::call(A_, out_); |
19498 | return out;; |
19499 | } |
19500 | } else { |
19501 | at::Tensor tmp_output; |
19502 | { |
19503 | at::AutoDispatchSkipFunctionalize guard; |
19504 | tmp_output = at::_ops::linalg_inv::call(A_); |
19505 | } |
19506 | at::functionalization::impl::replace_(out, tmp_output); |
19507 | at::functionalization::impl::commit_update(out); |
19508 | at::functionalization::impl::sync(out); |
19509 | return out; |
19510 | } |
19511 | } |
19512 | |
19513 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_out_U(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) { |
19514 | if (false) { |
19515 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19516 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19517 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19518 | auto A_meta = to_meta(A); |
19519 | auto U_meta = to_meta(U); |
19520 | auto S_meta = to_meta(S); |
19521 | auto Vh_meta = to_meta(Vh); |
19522 | at::AutoDispatchSkipFunctionalize func_guard; |
19523 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19524 | at::_ops::_linalg_svd_U::call(A_meta, full_matrices, compute_uv, driver, U_meta, S_meta, Vh_meta); |
19525 | } |
19526 | |
19527 | at::Tensor A_; |
19528 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
19529 | at::functionalization::impl::sync(A); |
19530 | A_ = at::functionalization::impl::from_functional_tensor(A); |
19531 | } else { |
19532 | A_ = A; |
19533 | } |
19534 | |
19535 | at::Tensor U_; |
19536 | if (at::functionalization::impl::isFunctionalTensor(U)) { |
19537 | at::functionalization::impl::sync(U); |
19538 | U_ = at::functionalization::impl::from_functional_tensor(U); |
19539 | } else { |
19540 | U_ = U; |
19541 | } |
19542 | |
19543 | at::Tensor S_; |
19544 | if (at::functionalization::impl::isFunctionalTensor(S)) { |
19545 | at::functionalization::impl::sync(S); |
19546 | S_ = at::functionalization::impl::from_functional_tensor(S); |
19547 | } else { |
19548 | S_ = S; |
19549 | } |
19550 | |
19551 | at::Tensor Vh_; |
19552 | if (at::functionalization::impl::isFunctionalTensor(Vh)) { |
19553 | at::functionalization::impl::sync(Vh); |
19554 | Vh_ = at::functionalization::impl::from_functional_tensor(Vh); |
19555 | } else { |
19556 | Vh_ = Vh; |
19557 | } |
19558 | if (!(true && at::functionalization::impl::isFunctionalTensor(U) && at::functionalization::impl::isFunctionalTensor(S) && at::functionalization::impl::isFunctionalTensor(Vh))) { |
19559 | if ((false || at::functionalization::impl::isFunctionalTensor(A))) { |
19560 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19561 | TORCH_INTERNAL_ASSERT(false, |
19562 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19563 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19564 | } else { |
19565 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19566 | at::AutoDispatchSkipFunctionalize guard; |
19567 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_linalg_svd_U::call(A_, full_matrices, compute_uv, driver, U_, S_, Vh_); |
19568 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(U, S, Vh);; |
19569 | } |
19570 | } else { |
19571 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
19572 | { |
19573 | at::AutoDispatchSkipFunctionalize guard; |
19574 | tmp_output = at::_ops::_linalg_svd::call(A_, full_matrices, compute_uv, driver); |
19575 | } |
19576 | at::functionalization::impl::replace_(U, std::get<0>(tmp_output)); |
19577 | at::functionalization::impl::commit_update(U); |
19578 | at::functionalization::impl::sync(U); |
19579 | at::functionalization::impl::replace_(S, std::get<1>(tmp_output)); |
19580 | at::functionalization::impl::commit_update(S); |
19581 | at::functionalization::impl::sync(S); |
19582 | at::functionalization::impl::replace_(Vh, std::get<2>(tmp_output)); |
19583 | at::functionalization::impl::commit_update(Vh); |
19584 | at::functionalization::impl::sync(Vh); |
19585 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(U, S, Vh); |
19586 | } |
19587 | } |
19588 | |
19589 | at::Tensor & linalg_svdvals_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::optional<c10::string_view> driver, at::Tensor & out) { |
19590 | if (false) { |
19591 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19592 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19593 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19594 | auto A_meta = to_meta(A); |
19595 | auto out_meta = to_meta(out); |
19596 | at::AutoDispatchSkipFunctionalize func_guard; |
19597 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19598 | at::_ops::linalg_svdvals_out::call(A_meta, driver, out_meta); |
19599 | } |
19600 | |
19601 | at::Tensor A_; |
19602 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
19603 | at::functionalization::impl::sync(A); |
19604 | A_ = at::functionalization::impl::from_functional_tensor(A); |
19605 | } else { |
19606 | A_ = A; |
19607 | } |
19608 | |
19609 | at::Tensor out_; |
19610 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19611 | at::functionalization::impl::sync(out); |
19612 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19613 | } else { |
19614 | out_ = out; |
19615 | } |
19616 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19617 | if ((false || at::functionalization::impl::isFunctionalTensor(A))) { |
19618 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19619 | TORCH_INTERNAL_ASSERT(false, |
19620 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19621 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19622 | } else { |
19623 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19624 | at::AutoDispatchSkipFunctionalize guard; |
19625 | at::Tensor tmp_output = at::_ops::linalg_svdvals_out::call(A_, driver, out_); |
19626 | return out;; |
19627 | } |
19628 | } else { |
19629 | at::Tensor tmp_output; |
19630 | { |
19631 | at::AutoDispatchSkipFunctionalize guard; |
19632 | tmp_output = at::_ops::linalg_svdvals::call(A_, driver); |
19633 | } |
19634 | at::functionalization::impl::replace_(out, tmp_output); |
19635 | at::functionalization::impl::commit_update(out); |
19636 | at::functionalization::impl::sync(out); |
19637 | return out; |
19638 | } |
19639 | } |
19640 | |
19641 | at::Tensor & linalg_pinv_out_atol_rtol_tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) { |
19642 | if (false) { |
19643 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19644 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19645 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19646 | auto self_meta = to_meta(self); |
19647 | auto atol_meta = to_meta(atol); |
19648 | auto rtol_meta = to_meta(rtol); |
19649 | auto out_meta = to_meta(out); |
19650 | at::AutoDispatchSkipFunctionalize func_guard; |
19651 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19652 | at::_ops::linalg_pinv_atol_rtol_tensor_out::call(self_meta, atol_meta, rtol_meta, hermitian, out_meta); |
19653 | } |
19654 | |
19655 | at::Tensor self_; |
19656 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19657 | at::functionalization::impl::sync(self); |
19658 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19659 | } else { |
19660 | self_ = self; |
19661 | } |
19662 | |
19663 | c10::optional<at::Tensor> atol_; |
19664 | if (at::functionalization::impl::isFunctionalTensor(atol)) { |
19665 | at::functionalization::impl::sync(atol); |
19666 | atol_ = at::functionalization::impl::from_functional_tensor(atol); |
19667 | } else { |
19668 | atol_ = atol; |
19669 | } |
19670 | |
19671 | c10::optional<at::Tensor> rtol_; |
19672 | if (at::functionalization::impl::isFunctionalTensor(rtol)) { |
19673 | at::functionalization::impl::sync(rtol); |
19674 | rtol_ = at::functionalization::impl::from_functional_tensor(rtol); |
19675 | } else { |
19676 | rtol_ = rtol; |
19677 | } |
19678 | |
19679 | at::Tensor out_; |
19680 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19681 | at::functionalization::impl::sync(out); |
19682 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19683 | } else { |
19684 | out_ = out; |
19685 | } |
19686 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19687 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(atol) || at::functionalization::impl::isFunctionalTensor(rtol))) { |
19688 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19689 | TORCH_INTERNAL_ASSERT(false, |
19690 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19691 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19692 | } else { |
19693 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19694 | at::AutoDispatchSkipFunctionalize guard; |
19695 | at::Tensor tmp_output = at::_ops::linalg_pinv_atol_rtol_tensor_out::call(self_, atol_, rtol_, hermitian, out_); |
19696 | return out;; |
19697 | } |
19698 | } else { |
19699 | at::Tensor tmp_output; |
19700 | { |
19701 | at::AutoDispatchSkipFunctionalize guard; |
19702 | tmp_output = at::_ops::linalg_pinv_atol_rtol_tensor::call(self_, atol_, rtol_, hermitian); |
19703 | } |
19704 | at::functionalization::impl::replace_(out, tmp_output); |
19705 | at::functionalization::impl::commit_update(out); |
19706 | at::functionalization::impl::sync(out); |
19707 | return out; |
19708 | } |
19709 | } |
19710 | |
19711 | at::Tensor & linalg_pinv_out_atol_rtol_float_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) { |
19712 | if (false) { |
19713 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19714 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19715 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19716 | auto self_meta = to_meta(self); |
19717 | auto out_meta = to_meta(out); |
19718 | at::AutoDispatchSkipFunctionalize func_guard; |
19719 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19720 | at::_ops::linalg_pinv_atol_rtol_float_out::call(self_meta, atol, rtol, hermitian, out_meta); |
19721 | } |
19722 | |
19723 | at::Tensor self_; |
19724 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19725 | at::functionalization::impl::sync(self); |
19726 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19727 | } else { |
19728 | self_ = self; |
19729 | } |
19730 | |
19731 | at::Tensor out_; |
19732 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19733 | at::functionalization::impl::sync(out); |
19734 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19735 | } else { |
19736 | out_ = out; |
19737 | } |
19738 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19739 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19740 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19741 | TORCH_INTERNAL_ASSERT(false, |
19742 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19743 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19744 | } else { |
19745 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19746 | at::AutoDispatchSkipFunctionalize guard; |
19747 | at::Tensor tmp_output = at::_ops::linalg_pinv_atol_rtol_float_out::call(self_, atol, rtol, hermitian, out_); |
19748 | return out;; |
19749 | } |
19750 | } else { |
19751 | at::Tensor tmp_output; |
19752 | { |
19753 | at::AutoDispatchSkipFunctionalize guard; |
19754 | tmp_output = at::_ops::linalg_pinv_atol_rtol_float::call(self_, atol, rtol, hermitian); |
19755 | } |
19756 | at::functionalization::impl::replace_(out, tmp_output); |
19757 | at::functionalization::impl::commit_update(out); |
19758 | at::functionalization::impl::sync(out); |
19759 | return out; |
19760 | } |
19761 | } |
19762 | |
19763 | at::Tensor & linalg_pinv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) { |
19764 | if (false) { |
19765 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19766 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19767 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19768 | auto self_meta = to_meta(self); |
19769 | auto out_meta = to_meta(out); |
19770 | at::AutoDispatchSkipFunctionalize func_guard; |
19771 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19772 | at::_ops::linalg_pinv_out::call(self_meta, rcond, hermitian, out_meta); |
19773 | } |
19774 | |
19775 | at::Tensor self_; |
19776 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19777 | at::functionalization::impl::sync(self); |
19778 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19779 | } else { |
19780 | self_ = self; |
19781 | } |
19782 | |
19783 | at::Tensor out_; |
19784 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19785 | at::functionalization::impl::sync(out); |
19786 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19787 | } else { |
19788 | out_ = out; |
19789 | } |
19790 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19791 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19792 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19793 | TORCH_INTERNAL_ASSERT(false, |
19794 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19795 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19796 | } else { |
19797 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19798 | at::AutoDispatchSkipFunctionalize guard; |
19799 | at::Tensor tmp_output = at::_ops::linalg_pinv_out::call(self_, rcond, hermitian, out_); |
19800 | return out;; |
19801 | } |
19802 | } else { |
19803 | at::Tensor tmp_output; |
19804 | { |
19805 | at::AutoDispatchSkipFunctionalize guard; |
19806 | tmp_output = at::_ops::linalg_pinv::call(self_, rcond, hermitian); |
19807 | } |
19808 | at::functionalization::impl::replace_(out, tmp_output); |
19809 | at::functionalization::impl::commit_update(out); |
19810 | at::functionalization::impl::sync(out); |
19811 | return out; |
19812 | } |
19813 | } |
19814 | |
19815 | at::Tensor & linalg_pinv_out_out_rcond_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) { |
19816 | if (false) { |
19817 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19818 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19819 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19820 | auto self_meta = to_meta(self); |
19821 | auto rcond_meta = to_meta(rcond); |
19822 | auto out_meta = to_meta(out); |
19823 | at::AutoDispatchSkipFunctionalize func_guard; |
19824 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19825 | at::_ops::linalg_pinv_out_rcond_tensor::call(self_meta, rcond_meta, hermitian, out_meta); |
19826 | } |
19827 | |
19828 | at::Tensor self_; |
19829 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19830 | at::functionalization::impl::sync(self); |
19831 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19832 | } else { |
19833 | self_ = self; |
19834 | } |
19835 | |
19836 | at::Tensor rcond_; |
19837 | if (at::functionalization::impl::isFunctionalTensor(rcond)) { |
19838 | at::functionalization::impl::sync(rcond); |
19839 | rcond_ = at::functionalization::impl::from_functional_tensor(rcond); |
19840 | } else { |
19841 | rcond_ = rcond; |
19842 | } |
19843 | |
19844 | at::Tensor out_; |
19845 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19846 | at::functionalization::impl::sync(out); |
19847 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19848 | } else { |
19849 | out_ = out; |
19850 | } |
19851 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19852 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(rcond))) { |
19853 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19854 | TORCH_INTERNAL_ASSERT(false, |
19855 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19856 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19857 | } else { |
19858 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19859 | at::AutoDispatchSkipFunctionalize guard; |
19860 | at::Tensor tmp_output = at::_ops::linalg_pinv_out_rcond_tensor::call(self_, rcond_, hermitian, out_); |
19861 | return out;; |
19862 | } |
19863 | } else { |
19864 | at::Tensor tmp_output; |
19865 | { |
19866 | at::AutoDispatchSkipFunctionalize guard; |
19867 | tmp_output = at::_ops::linalg_pinv_rcond_tensor::call(self_, rcond_, hermitian); |
19868 | } |
19869 | at::functionalization::impl::replace_(out, tmp_output); |
19870 | at::functionalization::impl::commit_update(out); |
19871 | at::functionalization::impl::sync(out); |
19872 | return out; |
19873 | } |
19874 | } |
19875 | |
19876 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_qr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) { |
19877 | if (false) { |
19878 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19879 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19880 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19881 | auto A_meta = to_meta(A); |
19882 | auto Q_meta = to_meta(Q); |
19883 | auto R_meta = to_meta(R); |
19884 | at::AutoDispatchSkipFunctionalize func_guard; |
19885 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19886 | at::_ops::linalg_qr_out::call(A_meta, mode, Q_meta, R_meta); |
19887 | } |
19888 | |
19889 | at::Tensor A_; |
19890 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
19891 | at::functionalization::impl::sync(A); |
19892 | A_ = at::functionalization::impl::from_functional_tensor(A); |
19893 | } else { |
19894 | A_ = A; |
19895 | } |
19896 | |
19897 | at::Tensor Q_; |
19898 | if (at::functionalization::impl::isFunctionalTensor(Q)) { |
19899 | at::functionalization::impl::sync(Q); |
19900 | Q_ = at::functionalization::impl::from_functional_tensor(Q); |
19901 | } else { |
19902 | Q_ = Q; |
19903 | } |
19904 | |
19905 | at::Tensor R_; |
19906 | if (at::functionalization::impl::isFunctionalTensor(R)) { |
19907 | at::functionalization::impl::sync(R); |
19908 | R_ = at::functionalization::impl::from_functional_tensor(R); |
19909 | } else { |
19910 | R_ = R; |
19911 | } |
19912 | if (!(true && at::functionalization::impl::isFunctionalTensor(Q) && at::functionalization::impl::isFunctionalTensor(R))) { |
19913 | if ((false || at::functionalization::impl::isFunctionalTensor(A))) { |
19914 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19915 | TORCH_INTERNAL_ASSERT(false, |
19916 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19917 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19918 | } else { |
19919 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19920 | at::AutoDispatchSkipFunctionalize guard; |
19921 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_qr_out::call(A_, mode, Q_, R_); |
19922 | return ::std::tuple<at::Tensor &,at::Tensor &>(Q, R);; |
19923 | } |
19924 | } else { |
19925 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
19926 | { |
19927 | at::AutoDispatchSkipFunctionalize guard; |
19928 | tmp_output = at::_ops::linalg_qr::call(A_, mode); |
19929 | } |
19930 | at::functionalization::impl::replace_(Q, std::get<0>(tmp_output)); |
19931 | at::functionalization::impl::commit_update(Q); |
19932 | at::functionalization::impl::sync(Q); |
19933 | at::functionalization::impl::replace_(R, std::get<1>(tmp_output)); |
19934 | at::functionalization::impl::commit_update(R); |
19935 | at::functionalization::impl::sync(R); |
19936 | return ::std::tuple<at::Tensor &,at::Tensor &>(Q, R); |
19937 | } |
19938 | } |
19939 | |
19940 | at::Tensor & _test_optional_intlist_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) { |
19941 | if (false) { |
19942 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19943 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19944 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19945 | auto values_meta = to_meta(values); |
19946 | auto out_meta = to_meta(out); |
19947 | at::AutoDispatchSkipFunctionalize func_guard; |
19948 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19949 | at::_ops::_test_optional_intlist_out::call(values_meta, addends, out_meta); |
19950 | } |
19951 | |
19952 | at::Tensor values_; |
19953 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
19954 | at::functionalization::impl::sync(values); |
19955 | values_ = at::functionalization::impl::from_functional_tensor(values); |
19956 | } else { |
19957 | values_ = values; |
19958 | } |
19959 | |
19960 | at::Tensor out_; |
19961 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19962 | at::functionalization::impl::sync(out); |
19963 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19964 | } else { |
19965 | out_ = out; |
19966 | } |
19967 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19968 | if ((false || at::functionalization::impl::isFunctionalTensor(values))) { |
19969 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19970 | TORCH_INTERNAL_ASSERT(false, |
19971 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19972 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19973 | } else { |
19974 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19975 | at::AutoDispatchSkipFunctionalize guard; |
19976 | at::Tensor tmp_output = at::_ops::_test_optional_intlist_out::call(values_, addends, out_); |
19977 | return out;; |
19978 | } |
19979 | } else { |
19980 | at::Tensor tmp_output; |
19981 | { |
19982 | at::AutoDispatchSkipFunctionalize guard; |
19983 | tmp_output = at::_ops::_test_optional_intlist::call(values_, addends); |
19984 | } |
19985 | at::functionalization::impl::replace_(out, tmp_output); |
19986 | at::functionalization::impl::commit_update(out); |
19987 | at::functionalization::impl::sync(out); |
19988 | return out; |
19989 | } |
19990 | } |
19991 | |
19992 | at::Tensor & _fw_primal_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level, at::Tensor & out) { |
19993 | if (false) { |
19994 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19995 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19996 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19997 | auto self_meta = to_meta(self); |
19998 | auto out_meta = to_meta(out); |
19999 | at::AutoDispatchSkipFunctionalize func_guard; |
20000 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20001 | at::_ops::_fw_primal_copy_out::call(self_meta, level, out_meta); |
20002 | } |
20003 | |
20004 | at::Tensor self_; |
20005 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20006 | at::functionalization::impl::sync(self); |
20007 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20008 | } else { |
20009 | self_ = self; |
20010 | } |
20011 | |
20012 | at::Tensor out_; |
20013 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20014 | at::functionalization::impl::sync(out); |
20015 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20016 | } else { |
20017 | out_ = out; |
20018 | } |
20019 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20020 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20021 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20022 | TORCH_INTERNAL_ASSERT(false, |
20023 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20024 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20025 | } else { |
20026 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20027 | at::AutoDispatchSkipFunctionalize guard; |
20028 | at::Tensor tmp_output = at::_ops::_fw_primal_copy_out::call(self_, level, out_); |
20029 | return out;; |
20030 | } |
20031 | } else { |
20032 | at::Tensor tmp_output; |
20033 | { |
20034 | at::AutoDispatchSkipFunctionalize guard; |
20035 | tmp_output = at::_ops::_fw_primal_copy::call(self_, level); |
20036 | } |
20037 | at::functionalization::impl::replace_(out, tmp_output); |
20038 | at::functionalization::impl::commit_update(out); |
20039 | at::functionalization::impl::sync(out); |
20040 | return out; |
20041 | } |
20042 | } |
20043 | |
20044 | at::Tensor & as_strided_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) { |
20045 | if (false) { |
20046 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20047 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20048 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20049 | auto self_meta = to_meta(self); |
20050 | auto out_meta = to_meta(out); |
20051 | at::AutoDispatchSkipFunctionalize func_guard; |
20052 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20053 | at::_ops::as_strided_copy_out::call(self_meta, size, stride, storage_offset, out_meta); |
20054 | } |
20055 | |
20056 | at::Tensor self_; |
20057 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20058 | at::functionalization::impl::sync(self); |
20059 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20060 | } else { |
20061 | self_ = self; |
20062 | } |
20063 | |
20064 | at::Tensor out_; |
20065 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20066 | at::functionalization::impl::sync(out); |
20067 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20068 | } else { |
20069 | out_ = out; |
20070 | } |
20071 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20072 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20073 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20074 | TORCH_INTERNAL_ASSERT(false, |
20075 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20076 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20077 | } else { |
20078 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20079 | at::AutoDispatchSkipFunctionalize guard; |
20080 | at::Tensor tmp_output = at::_ops::as_strided_copy_out::call(self_, size, stride, storage_offset, out_); |
20081 | return out;; |
20082 | } |
20083 | } else { |
20084 | at::Tensor tmp_output; |
20085 | { |
20086 | at::AutoDispatchSkipFunctionalize guard; |
20087 | tmp_output = at::_ops::as_strided_copy::call(self_, size, stride, storage_offset); |
20088 | } |
20089 | at::functionalization::impl::replace_(out, tmp_output); |
20090 | at::functionalization::impl::commit_update(out); |
20091 | at::functionalization::impl::sync(out); |
20092 | return out; |
20093 | } |
20094 | } |
20095 | |
20096 | at::Tensor & expand_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit, at::Tensor & out) { |
20097 | if (false) { |
20098 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20099 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20100 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20101 | auto self_meta = to_meta(self); |
20102 | auto out_meta = to_meta(out); |
20103 | at::AutoDispatchSkipFunctionalize func_guard; |
20104 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20105 | at::_ops::expand_copy_out::call(self_meta, size, implicit, out_meta); |
20106 | } |
20107 | |
20108 | at::Tensor self_; |
20109 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20110 | at::functionalization::impl::sync(self); |
20111 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20112 | } else { |
20113 | self_ = self; |
20114 | } |
20115 | |
20116 | at::Tensor out_; |
20117 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20118 | at::functionalization::impl::sync(out); |
20119 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20120 | } else { |
20121 | out_ = out; |
20122 | } |
20123 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20124 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20125 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20126 | TORCH_INTERNAL_ASSERT(false, |
20127 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20128 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20129 | } else { |
20130 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20131 | at::AutoDispatchSkipFunctionalize guard; |
20132 | at::Tensor tmp_output = at::_ops::expand_copy_out::call(self_, size, implicit, out_); |
20133 | return out;; |
20134 | } |
20135 | } else { |
20136 | at::Tensor tmp_output; |
20137 | { |
20138 | at::AutoDispatchSkipFunctionalize guard; |
20139 | tmp_output = at::_ops::expand_copy::call(self_, size, implicit); |
20140 | } |
20141 | at::functionalization::impl::replace_(out, tmp_output); |
20142 | at::functionalization::impl::commit_update(out); |
20143 | at::functionalization::impl::sync(out); |
20144 | return out; |
20145 | } |
20146 | } |
20147 | |
20148 | at::Tensor & _reshape_alias_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
20149 | if (false) { |
20150 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20151 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20152 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20153 | auto self_meta = to_meta(self); |
20154 | auto out_meta = to_meta(out); |
20155 | at::AutoDispatchSkipFunctionalize func_guard; |
20156 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20157 | at::_ops::_reshape_alias_copy_out::call(self_meta, size, stride, out_meta); |
20158 | } |
20159 | |
20160 | at::Tensor self_; |
20161 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20162 | at::functionalization::impl::sync(self); |
20163 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20164 | } else { |
20165 | self_ = self; |
20166 | } |
20167 | |
20168 | at::Tensor out_; |
20169 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20170 | at::functionalization::impl::sync(out); |
20171 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20172 | } else { |
20173 | out_ = out; |
20174 | } |
20175 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20176 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20177 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20178 | TORCH_INTERNAL_ASSERT(false, |
20179 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20180 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20181 | } else { |
20182 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20183 | at::AutoDispatchSkipFunctionalize guard; |
20184 | at::Tensor tmp_output = at::_ops::_reshape_alias_copy_out::call(self_, size, stride, out_); |
20185 | return out;; |
20186 | } |
20187 | } else { |
20188 | at::Tensor tmp_output; |
20189 | { |
20190 | at::AutoDispatchSkipFunctionalize guard; |
20191 | tmp_output = at::_ops::_reshape_alias_copy::call(self_, size, stride); |
20192 | } |
20193 | at::functionalization::impl::replace_(out, tmp_output); |
20194 | at::functionalization::impl::commit_update(out); |
20195 | at::functionalization::impl::sync(out); |
20196 | return out; |
20197 | } |
20198 | } |
20199 | |
20200 | at::Tensor & select_copy_out_int_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out) { |
20201 | if (false) { |
20202 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20203 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20204 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20205 | auto self_meta = to_meta(self); |
20206 | auto out_meta = to_meta(out); |
20207 | at::AutoDispatchSkipFunctionalize func_guard; |
20208 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20209 | at::_ops::select_copy_int_out::call(self_meta, dim, index, out_meta); |
20210 | } |
20211 | |
20212 | at::Tensor self_; |
20213 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20214 | at::functionalization::impl::sync(self); |
20215 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20216 | } else { |
20217 | self_ = self; |
20218 | } |
20219 | |
20220 | at::Tensor out_; |
20221 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20222 | at::functionalization::impl::sync(out); |
20223 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20224 | } else { |
20225 | out_ = out; |
20226 | } |
20227 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20228 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20229 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20230 | TORCH_INTERNAL_ASSERT(false, |
20231 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20232 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20233 | } else { |
20234 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20235 | at::AutoDispatchSkipFunctionalize guard; |
20236 | at::Tensor tmp_output = at::_ops::select_copy_int_out::call(self_, dim, index, out_); |
20237 | return out;; |
20238 | } |
20239 | } else { |
20240 | at::Tensor tmp_output; |
20241 | { |
20242 | at::AutoDispatchSkipFunctionalize guard; |
20243 | tmp_output = at::_ops::select_copy_int::call(self_, dim, index); |
20244 | } |
20245 | at::functionalization::impl::replace_(out, tmp_output); |
20246 | at::functionalization::impl::commit_update(out); |
20247 | at::functionalization::impl::sync(out); |
20248 | return out; |
20249 | } |
20250 | } |
20251 | |
20252 | void split_with_sizes_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { |
20253 | if (false) { |
20254 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20255 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20256 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20257 | auto self_meta = to_meta(self); |
20258 | auto out_meta = to_meta(out); |
20259 | at::AutoDispatchSkipFunctionalize func_guard; |
20260 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20261 | at::_ops::split_with_sizes_copy_out::call(self_meta, split_sizes, dim, out_meta); |
20262 | } |
20263 | |
20264 | at::Tensor self_; |
20265 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20266 | at::functionalization::impl::sync(self); |
20267 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20268 | } else { |
20269 | self_ = self; |
20270 | } |
20271 | |
20272 | ::std::vector<at::Tensor> out_; |
20273 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20274 | at::functionalization::impl::sync(out); |
20275 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20276 | } else { |
20277 | out_ = out.vec(); |
20278 | } |
20279 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20280 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20281 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20282 | TORCH_INTERNAL_ASSERT(false, |
20283 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20284 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20285 | } else { |
20286 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20287 | at::AutoDispatchSkipFunctionalize guard; |
20288 | at::_ops::split_with_sizes_copy_out::call(self_, split_sizes, dim, out_); |
20289 | ; |
20290 | } |
20291 | } else { |
20292 | ::std::vector<at::Tensor> tmp_output; |
20293 | { |
20294 | at::AutoDispatchSkipFunctionalize guard; |
20295 | tmp_output = at::_ops::split_with_sizes_copy::call(self_, split_sizes, dim); |
20296 | } |
20297 | at::functionalization::impl::replace_(out, tmp_output); |
20298 | at::functionalization::impl::commit_update(out); |
20299 | at::functionalization::impl::sync(out); |
20300 | |
20301 | } |
20302 | } |
20303 | |
20304 | at::Tensor & squeeze_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
20305 | if (false) { |
20306 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20307 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20308 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20309 | auto self_meta = to_meta(self); |
20310 | auto out_meta = to_meta(out); |
20311 | at::AutoDispatchSkipFunctionalize func_guard; |
20312 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20313 | at::_ops::squeeze_copy_out::call(self_meta, out_meta); |
20314 | } |
20315 | |
20316 | at::Tensor self_; |
20317 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20318 | at::functionalization::impl::sync(self); |
20319 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20320 | } else { |
20321 | self_ = self; |
20322 | } |
20323 | |
20324 | at::Tensor out_; |
20325 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20326 | at::functionalization::impl::sync(out); |
20327 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20328 | } else { |
20329 | out_ = out; |
20330 | } |
20331 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20332 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20333 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20334 | TORCH_INTERNAL_ASSERT(false, |
20335 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20336 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20337 | } else { |
20338 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20339 | at::AutoDispatchSkipFunctionalize guard; |
20340 | at::Tensor tmp_output = at::_ops::squeeze_copy_out::call(self_, out_); |
20341 | return out;; |
20342 | } |
20343 | } else { |
20344 | at::Tensor tmp_output; |
20345 | { |
20346 | at::AutoDispatchSkipFunctionalize guard; |
20347 | tmp_output = at::_ops::squeeze_copy::call(self_); |
20348 | } |
20349 | at::functionalization::impl::replace_(out, tmp_output); |
20350 | at::functionalization::impl::commit_update(out); |
20351 | at::functionalization::impl::sync(out); |
20352 | return out; |
20353 | } |
20354 | } |
20355 | |
20356 | at::Tensor & squeeze_copy_out_dim_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { |
20357 | if (false) { |
20358 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20359 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20360 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20361 | auto self_meta = to_meta(self); |
20362 | auto out_meta = to_meta(out); |
20363 | at::AutoDispatchSkipFunctionalize func_guard; |
20364 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20365 | at::_ops::squeeze_copy_dim_out::call(self_meta, dim, out_meta); |
20366 | } |
20367 | |
20368 | at::Tensor self_; |
20369 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20370 | at::functionalization::impl::sync(self); |
20371 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20372 | } else { |
20373 | self_ = self; |
20374 | } |
20375 | |
20376 | at::Tensor out_; |
20377 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20378 | at::functionalization::impl::sync(out); |
20379 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20380 | } else { |
20381 | out_ = out; |
20382 | } |
20383 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20384 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20385 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20386 | TORCH_INTERNAL_ASSERT(false, |
20387 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20388 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20389 | } else { |
20390 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20391 | at::AutoDispatchSkipFunctionalize guard; |
20392 | at::Tensor tmp_output = at::_ops::squeeze_copy_dim_out::call(self_, dim, out_); |
20393 | return out;; |
20394 | } |
20395 | } else { |
20396 | at::Tensor tmp_output; |
20397 | { |
20398 | at::AutoDispatchSkipFunctionalize guard; |
20399 | tmp_output = at::_ops::squeeze_copy_dim::call(self_, dim); |
20400 | } |
20401 | at::functionalization::impl::replace_(out, tmp_output); |
20402 | at::functionalization::impl::commit_update(out); |
20403 | at::functionalization::impl::sync(out); |
20404 | return out; |
20405 | } |
20406 | } |
20407 | |
20408 | at::Tensor & squeeze_copy_out_dims_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { |
20409 | if (false) { |
20410 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20411 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20412 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20413 | auto self_meta = to_meta(self); |
20414 | auto out_meta = to_meta(out); |
20415 | at::AutoDispatchSkipFunctionalize func_guard; |
20416 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20417 | at::_ops::squeeze_copy_dims_out::call(self_meta, dim, out_meta); |
20418 | } |
20419 | |
20420 | at::Tensor self_; |
20421 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20422 | at::functionalization::impl::sync(self); |
20423 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20424 | } else { |
20425 | self_ = self; |
20426 | } |
20427 | |
20428 | at::Tensor out_; |
20429 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20430 | at::functionalization::impl::sync(out); |
20431 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20432 | } else { |
20433 | out_ = out; |
20434 | } |
20435 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20436 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20437 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20438 | TORCH_INTERNAL_ASSERT(false, |
20439 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20440 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20441 | } else { |
20442 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20443 | at::AutoDispatchSkipFunctionalize guard; |
20444 | at::Tensor tmp_output = at::_ops::squeeze_copy_dims_out::call(self_, dim, out_); |
20445 | return out;; |
20446 | } |
20447 | } else { |
20448 | at::Tensor tmp_output; |
20449 | { |
20450 | at::AutoDispatchSkipFunctionalize guard; |
20451 | tmp_output = at::_ops::squeeze_copy_dims::call(self_, dim); |
20452 | } |
20453 | at::functionalization::impl::replace_(out, tmp_output); |
20454 | at::functionalization::impl::commit_update(out); |
20455 | at::functionalization::impl::sync(out); |
20456 | return out; |
20457 | } |
20458 | } |
20459 | |
20460 | at::Tensor & _indices_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
20461 | if (false) { |
20462 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20463 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20464 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20465 | auto self_meta = to_meta(self); |
20466 | auto out_meta = to_meta(out); |
20467 | at::AutoDispatchSkipFunctionalize func_guard; |
20468 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20469 | at::_ops::_indices_copy_out::call(self_meta, out_meta); |
20470 | } |
20471 | |
20472 | at::Tensor self_; |
20473 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20474 | at::functionalization::impl::sync(self); |
20475 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20476 | } else { |
20477 | self_ = self; |
20478 | } |
20479 | |
20480 | at::Tensor out_; |
20481 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20482 | at::functionalization::impl::sync(out); |
20483 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20484 | } else { |
20485 | out_ = out; |
20486 | } |
20487 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20488 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20489 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20490 | TORCH_INTERNAL_ASSERT(false, |
20491 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20492 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20493 | } else { |
20494 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20495 | at::AutoDispatchSkipFunctionalize guard; |
20496 | at::Tensor tmp_output = at::_ops::_indices_copy_out::call(self_, out_); |
20497 | return out;; |
20498 | } |
20499 | } else { |
20500 | at::Tensor tmp_output; |
20501 | { |
20502 | at::AutoDispatchSkipFunctionalize guard; |
20503 | tmp_output = at::_ops::_indices_copy::call(self_); |
20504 | } |
20505 | at::functionalization::impl::replace_(out, tmp_output); |
20506 | at::functionalization::impl::commit_update(out); |
20507 | at::functionalization::impl::sync(out); |
20508 | return out; |
20509 | } |
20510 | } |
20511 | |
20512 | at::Tensor & _values_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
20513 | if (false) { |
20514 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20515 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20516 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20517 | auto self_meta = to_meta(self); |
20518 | auto out_meta = to_meta(out); |
20519 | at::AutoDispatchSkipFunctionalize func_guard; |
20520 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20521 | at::_ops::_values_copy_out::call(self_meta, out_meta); |
20522 | } |
20523 | |
20524 | at::Tensor self_; |
20525 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20526 | at::functionalization::impl::sync(self); |
20527 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20528 | } else { |
20529 | self_ = self; |
20530 | } |
20531 | |
20532 | at::Tensor out_; |
20533 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20534 | at::functionalization::impl::sync(out); |
20535 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20536 | } else { |
20537 | out_ = out; |
20538 | } |
20539 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20540 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20541 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20542 | TORCH_INTERNAL_ASSERT(false, |
20543 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20544 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20545 | } else { |
20546 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20547 | at::AutoDispatchSkipFunctionalize guard; |
20548 | at::Tensor tmp_output = at::_ops::_values_copy_out::call(self_, out_); |
20549 | return out;; |
20550 | } |
20551 | } else { |
20552 | at::Tensor tmp_output; |
20553 | { |
20554 | at::AutoDispatchSkipFunctionalize guard; |
20555 | tmp_output = at::_ops::_values_copy::call(self_); |
20556 | } |
20557 | at::functionalization::impl::replace_(out, tmp_output); |
20558 | at::functionalization::impl::commit_update(out); |
20559 | at::functionalization::impl::sync(out); |
20560 | return out; |
20561 | } |
20562 | } |
20563 | |
20564 | at::Tensor & crow_indices_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
20565 | if (false) { |
20566 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20567 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20568 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20569 | auto self_meta = to_meta(self); |
20570 | auto out_meta = to_meta(out); |
20571 | at::AutoDispatchSkipFunctionalize func_guard; |
20572 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20573 | at::_ops::crow_indices_copy_out::call(self_meta, out_meta); |
20574 | } |
20575 | |
20576 | at::Tensor self_; |
20577 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20578 | at::functionalization::impl::sync(self); |
20579 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20580 | } else { |
20581 | self_ = self; |
20582 | } |
20583 | |
20584 | at::Tensor out_; |
20585 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20586 | at::functionalization::impl::sync(out); |
20587 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20588 | } else { |
20589 | out_ = out; |
20590 | } |
20591 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20592 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20593 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20594 | TORCH_INTERNAL_ASSERT(false, |
20595 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20596 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20597 | } else { |
20598 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20599 | at::AutoDispatchSkipFunctionalize guard; |
20600 | at::Tensor tmp_output = at::_ops::crow_indices_copy_out::call(self_, out_); |
20601 | return out;; |
20602 | } |
20603 | } else { |
20604 | at::Tensor tmp_output; |
20605 | { |
20606 | at::AutoDispatchSkipFunctionalize guard; |
20607 | tmp_output = at::_ops::crow_indices_copy::call(self_); |
20608 | } |
20609 | at::functionalization::impl::replace_(out, tmp_output); |
20610 | at::functionalization::impl::commit_update(out); |
20611 | at::functionalization::impl::sync(out); |
20612 | return out; |
20613 | } |
20614 | } |
20615 | |
20616 | at::Tensor & col_indices_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
20617 | if (false) { |
20618 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20619 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20620 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20621 | auto self_meta = to_meta(self); |
20622 | auto out_meta = to_meta(out); |
20623 | at::AutoDispatchSkipFunctionalize func_guard; |
20624 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20625 | at::_ops::col_indices_copy_out::call(self_meta, out_meta); |
20626 | } |
20627 | |
20628 | at::Tensor self_; |
20629 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20630 | at::functionalization::impl::sync(self); |
20631 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20632 | } else { |
20633 | self_ = self; |
20634 | } |
20635 | |
20636 | at::Tensor out_; |
20637 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20638 | at::functionalization::impl::sync(out); |
20639 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20640 | } else { |
20641 | out_ = out; |
20642 | } |
20643 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20644 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20645 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20646 | TORCH_INTERNAL_ASSERT(false, |
20647 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20648 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20649 | } else { |
20650 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20651 | at::AutoDispatchSkipFunctionalize guard; |
20652 | at::Tensor tmp_output = at::_ops::col_indices_copy_out::call(self_, out_); |
20653 | return out;; |
20654 | } |
20655 | } else { |
20656 | at::Tensor tmp_output; |
20657 | { |
20658 | at::AutoDispatchSkipFunctionalize guard; |
20659 | tmp_output = at::_ops::col_indices_copy::call(self_); |
20660 | } |
20661 | at::functionalization::impl::replace_(out, tmp_output); |
20662 | at::functionalization::impl::commit_update(out); |
20663 | at::functionalization::impl::sync(out); |
20664 | return out; |
20665 | } |
20666 | } |
20667 | |
20668 | void unbind_copy_out_int_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::TensorList out) { |
20669 | if (false) { |
20670 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20671 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20672 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20673 | auto self_meta = to_meta(self); |
20674 | auto out_meta = to_meta(out); |
20675 | at::AutoDispatchSkipFunctionalize func_guard; |
20676 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20677 | at::_ops::unbind_copy_int_out::call(self_meta, dim, out_meta); |
20678 | } |
20679 | |
20680 | at::Tensor self_; |
20681 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20682 | at::functionalization::impl::sync(self); |
20683 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20684 | } else { |
20685 | self_ = self; |
20686 | } |
20687 | |
20688 | ::std::vector<at::Tensor> out_; |
20689 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20690 | at::functionalization::impl::sync(out); |
20691 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20692 | } else { |
20693 | out_ = out.vec(); |
20694 | } |
20695 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20696 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20697 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20698 | TORCH_INTERNAL_ASSERT(false, |
20699 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20700 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20701 | } else { |
20702 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20703 | at::AutoDispatchSkipFunctionalize guard; |
20704 | at::_ops::unbind_copy_int_out::call(self_, dim, out_); |
20705 | ; |
20706 | } |
20707 | } else { |
20708 | ::std::vector<at::Tensor> tmp_output; |
20709 | { |
20710 | at::AutoDispatchSkipFunctionalize guard; |
20711 | tmp_output = at::_ops::unbind_copy_int::call(self_, dim); |
20712 | } |
20713 | at::functionalization::impl::replace_(out, tmp_output); |
20714 | at::functionalization::impl::commit_update(out); |
20715 | at::functionalization::impl::sync(out); |
20716 | |
20717 | } |
20718 | } |
20719 | |
20720 | at::Tensor & view_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
20721 | if (false) { |
20722 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20723 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20724 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20725 | auto self_meta = to_meta(self); |
20726 | auto out_meta = to_meta(out); |
20727 | at::AutoDispatchSkipFunctionalize func_guard; |
20728 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20729 | at::_ops::view_copy_out::call(self_meta, size, out_meta); |
20730 | } |
20731 | |
20732 | at::Tensor self_; |
20733 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20734 | at::functionalization::impl::sync(self); |
20735 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20736 | } else { |
20737 | self_ = self; |
20738 | } |
20739 | |
20740 | at::Tensor out_; |
20741 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20742 | at::functionalization::impl::sync(out); |
20743 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20744 | } else { |
20745 | out_ = out; |
20746 | } |
20747 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20748 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20749 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20750 | TORCH_INTERNAL_ASSERT(false, |
20751 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20752 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20753 | } else { |
20754 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20755 | at::AutoDispatchSkipFunctionalize guard; |
20756 | at::Tensor tmp_output = at::_ops::view_copy_out::call(self_, size, out_); |
20757 | return out;; |
20758 | } |
20759 | } else { |
20760 | at::Tensor tmp_output; |
20761 | { |
20762 | at::AutoDispatchSkipFunctionalize guard; |
20763 | tmp_output = at::_ops::view_copy::call(self_, size); |
20764 | } |
20765 | at::functionalization::impl::replace_(out, tmp_output); |
20766 | at::functionalization::impl::commit_update(out); |
20767 | at::functionalization::impl::sync(out); |
20768 | return out; |
20769 | } |
20770 | } |
20771 | |
20772 | at::Tensor & view_copy_out_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, at::Tensor & out) { |
20773 | if (false) { |
20774 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20775 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20776 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20777 | auto self_meta = to_meta(self); |
20778 | auto out_meta = to_meta(out); |
20779 | at::AutoDispatchSkipFunctionalize func_guard; |
20780 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20781 | at::_ops::view_copy_dtype_out::call(self_meta, dtype, out_meta); |
20782 | } |
20783 | |
20784 | at::Tensor self_; |
20785 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20786 | at::functionalization::impl::sync(self); |
20787 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20788 | } else { |
20789 | self_ = self; |
20790 | } |
20791 | |
20792 | at::Tensor out_; |
20793 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20794 | at::functionalization::impl::sync(out); |
20795 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20796 | } else { |
20797 | out_ = out; |
20798 | } |
20799 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20800 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20801 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20802 | TORCH_INTERNAL_ASSERT(false, |
20803 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20804 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20805 | } else { |
20806 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20807 | at::AutoDispatchSkipFunctionalize guard; |
20808 | at::Tensor tmp_output = at::_ops::view_copy_dtype_out::call(self_, dtype, out_); |
20809 | return out;; |
20810 | } |
20811 | } else { |
20812 | at::Tensor tmp_output; |
20813 | { |
20814 | at::AutoDispatchSkipFunctionalize guard; |
20815 | tmp_output = at::_ops::view_copy_dtype::call(self_, dtype); |
20816 | } |
20817 | at::functionalization::impl::replace_(out, tmp_output); |
20818 | at::functionalization::impl::commit_update(out); |
20819 | at::functionalization::impl::sync(out); |
20820 | return out; |
20821 | } |
20822 | } |
20823 | |
20824 | at::Tensor & alias_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
20825 | if (false) { |
20826 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20827 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20828 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20829 | auto self_meta = to_meta(self); |
20830 | auto out_meta = to_meta(out); |
20831 | at::AutoDispatchSkipFunctionalize func_guard; |
20832 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20833 | at::_ops::alias_copy_out::call(self_meta, out_meta); |
20834 | } |
20835 | |
20836 | at::Tensor self_; |
20837 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20838 | at::functionalization::impl::sync(self); |
20839 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20840 | } else { |
20841 | self_ = self; |
20842 | } |
20843 | |
20844 | at::Tensor out_; |
20845 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20846 | at::functionalization::impl::sync(out); |
20847 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20848 | } else { |
20849 | out_ = out; |
20850 | } |
20851 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20852 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20853 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20854 | TORCH_INTERNAL_ASSERT(false, |
20855 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20856 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20857 | } else { |
20858 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20859 | at::AutoDispatchSkipFunctionalize guard; |
20860 | at::Tensor tmp_output = at::_ops::alias_copy_out::call(self_, out_); |
20861 | return out;; |
20862 | } |
20863 | } else { |
20864 | at::Tensor tmp_output; |
20865 | { |
20866 | at::AutoDispatchSkipFunctionalize guard; |
20867 | tmp_output = at::_ops::alias_copy::call(self_); |
20868 | } |
20869 | at::functionalization::impl::replace_(out, tmp_output); |
20870 | at::functionalization::impl::commit_update(out); |
20871 | at::functionalization::impl::sync(out); |
20872 | return out; |
20873 | } |
20874 | } |
20875 | |
20876 | at::Tensor & special_airy_ai_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) { |
20877 | if (false) { |
20878 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20879 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20880 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20881 | auto x_meta = to_meta(x); |
20882 | auto out_meta = to_meta(out); |
20883 | at::AutoDispatchSkipFunctionalize func_guard; |
20884 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20885 | at::_ops::special_airy_ai_out::call(x_meta, out_meta); |
20886 | } |
20887 | |
20888 | at::Tensor x_; |
20889 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
20890 | at::functionalization::impl::sync(x); |
20891 | x_ = at::functionalization::impl::from_functional_tensor(x); |
20892 | } else { |
20893 | x_ = x; |
20894 | } |
20895 | |
20896 | at::Tensor out_; |
20897 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20898 | at::functionalization::impl::sync(out); |
20899 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20900 | } else { |
20901 | out_ = out; |
20902 | } |
20903 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20904 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
20905 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20906 | TORCH_INTERNAL_ASSERT(false, |
20907 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20908 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20909 | } else { |
20910 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20911 | at::AutoDispatchSkipFunctionalize guard; |
20912 | at::Tensor tmp_output = at::_ops::special_airy_ai_out::call(x_, out_); |
20913 | return out;; |
20914 | } |
20915 | } else { |
20916 | at::Tensor tmp_output; |
20917 | { |
20918 | at::AutoDispatchSkipFunctionalize guard; |
20919 | tmp_output = at::_ops::special_airy_ai::call(x_); |
20920 | } |
20921 | at::functionalization::impl::replace_(out, tmp_output); |
20922 | at::functionalization::impl::commit_update(out); |
20923 | at::functionalization::impl::sync(out); |
20924 | return out; |
20925 | } |
20926 | } |
20927 | |
20928 | at::Tensor & special_bessel_j0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
20929 | if (false) { |
20930 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20931 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20932 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20933 | auto self_meta = to_meta(self); |
20934 | auto out_meta = to_meta(out); |
20935 | at::AutoDispatchSkipFunctionalize func_guard; |
20936 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20937 | at::_ops::special_bessel_j0_out::call(self_meta, out_meta); |
20938 | } |
20939 | |
20940 | at::Tensor self_; |
20941 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20942 | at::functionalization::impl::sync(self); |
20943 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20944 | } else { |
20945 | self_ = self; |
20946 | } |
20947 | |
20948 | at::Tensor out_; |
20949 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20950 | at::functionalization::impl::sync(out); |
20951 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20952 | } else { |
20953 | out_ = out; |
20954 | } |
20955 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20956 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20957 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20958 | TORCH_INTERNAL_ASSERT(false, |
20959 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20960 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20961 | } else { |
20962 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20963 | at::AutoDispatchSkipFunctionalize guard; |
20964 | at::Tensor tmp_output = at::_ops::special_bessel_j0_out::call(self_, out_); |
20965 | return out;; |
20966 | } |
20967 | } else { |
20968 | at::Tensor tmp_output; |
20969 | { |
20970 | at::AutoDispatchSkipFunctionalize guard; |
20971 | tmp_output = at::_ops::special_bessel_j0::call(self_); |
20972 | } |
20973 | at::functionalization::impl::replace_(out, tmp_output); |
20974 | at::functionalization::impl::commit_update(out); |
20975 | at::functionalization::impl::sync(out); |
20976 | return out; |
20977 | } |
20978 | } |
20979 | |
20980 | at::Tensor & special_chebyshev_polynomial_v_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { |
20981 | if (false) { |
20982 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20983 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20984 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20985 | auto x_meta = to_meta(x); |
20986 | auto n_meta = to_meta(n); |
20987 | auto out_meta = to_meta(out); |
20988 | at::AutoDispatchSkipFunctionalize func_guard; |
20989 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20990 | at::_ops::special_chebyshev_polynomial_v_out::call(x_meta, n_meta, out_meta); |
20991 | } |
20992 | |
20993 | at::Tensor x_; |
20994 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
20995 | at::functionalization::impl::sync(x); |
20996 | x_ = at::functionalization::impl::from_functional_tensor(x); |
20997 | } else { |
20998 | x_ = x; |
20999 | } |
21000 | |
21001 | at::Tensor n_; |
21002 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21003 | at::functionalization::impl::sync(n); |
21004 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21005 | } else { |
21006 | n_ = n; |
21007 | } |
21008 | |
21009 | at::Tensor out_; |
21010 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21011 | at::functionalization::impl::sync(out); |
21012 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21013 | } else { |
21014 | out_ = out; |
21015 | } |
21016 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21017 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) { |
21018 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21019 | TORCH_INTERNAL_ASSERT(false, |
21020 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21021 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21022 | } else { |
21023 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21024 | at::AutoDispatchSkipFunctionalize guard; |
21025 | at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_v_out::call(x_, n_, out_); |
21026 | return out;; |
21027 | } |
21028 | } else { |
21029 | at::Tensor tmp_output; |
21030 | { |
21031 | at::AutoDispatchSkipFunctionalize guard; |
21032 | tmp_output = at::_ops::special_chebyshev_polynomial_v::call(x_, n_); |
21033 | } |
21034 | at::functionalization::impl::replace_(out, tmp_output); |
21035 | at::functionalization::impl::commit_update(out); |
21036 | at::functionalization::impl::sync(out); |
21037 | return out; |
21038 | } |
21039 | } |
21040 | |
21041 | at::Tensor & special_chebyshev_polynomial_v_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
21042 | if (false) { |
21043 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21044 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21045 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21046 | auto n_meta = to_meta(n); |
21047 | auto out_meta = to_meta(out); |
21048 | at::AutoDispatchSkipFunctionalize func_guard; |
21049 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21050 | at::_ops::special_chebyshev_polynomial_v_x_scalar_out::call(x, n_meta, out_meta); |
21051 | } |
21052 | |
21053 | at::Tensor n_; |
21054 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21055 | at::functionalization::impl::sync(n); |
21056 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21057 | } else { |
21058 | n_ = n; |
21059 | } |
21060 | |
21061 | at::Tensor out_; |
21062 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21063 | at::functionalization::impl::sync(out); |
21064 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21065 | } else { |
21066 | out_ = out; |
21067 | } |
21068 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21069 | if ((false || at::functionalization::impl::isFunctionalTensor(n))) { |
21070 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21071 | TORCH_INTERNAL_ASSERT(false, |
21072 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21073 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21074 | } else { |
21075 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21076 | at::AutoDispatchSkipFunctionalize guard; |
21077 | at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_v_x_scalar_out::call(x, n_, out_); |
21078 | return out;; |
21079 | } |
21080 | } else { |
21081 | at::Tensor tmp_output; |
21082 | { |
21083 | at::AutoDispatchSkipFunctionalize guard; |
21084 | tmp_output = at::_ops::special_chebyshev_polynomial_v_x_scalar::call(x, n_); |
21085 | } |
21086 | at::functionalization::impl::replace_(out, tmp_output); |
21087 | at::functionalization::impl::commit_update(out); |
21088 | at::functionalization::impl::sync(out); |
21089 | return out; |
21090 | } |
21091 | } |
21092 | |
21093 | at::Tensor & special_chebyshev_polynomial_v_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
21094 | if (false) { |
21095 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21096 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21097 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21098 | auto x_meta = to_meta(x); |
21099 | auto out_meta = to_meta(out); |
21100 | at::AutoDispatchSkipFunctionalize func_guard; |
21101 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21102 | at::_ops::special_chebyshev_polynomial_v_n_scalar_out::call(x_meta, n, out_meta); |
21103 | } |
21104 | |
21105 | at::Tensor x_; |
21106 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
21107 | at::functionalization::impl::sync(x); |
21108 | x_ = at::functionalization::impl::from_functional_tensor(x); |
21109 | } else { |
21110 | x_ = x; |
21111 | } |
21112 | |
21113 | at::Tensor out_; |
21114 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21115 | at::functionalization::impl::sync(out); |
21116 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21117 | } else { |
21118 | out_ = out; |
21119 | } |
21120 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21121 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
21122 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21123 | TORCH_INTERNAL_ASSERT(false, |
21124 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21125 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21126 | } else { |
21127 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21128 | at::AutoDispatchSkipFunctionalize guard; |
21129 | at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_v_n_scalar_out::call(x_, n, out_); |
21130 | return out;; |
21131 | } |
21132 | } else { |
21133 | at::Tensor tmp_output; |
21134 | { |
21135 | at::AutoDispatchSkipFunctionalize guard; |
21136 | tmp_output = at::_ops::special_chebyshev_polynomial_v_n_scalar::call(x_, n); |
21137 | } |
21138 | at::functionalization::impl::replace_(out, tmp_output); |
21139 | at::functionalization::impl::commit_update(out); |
21140 | at::functionalization::impl::sync(out); |
21141 | return out; |
21142 | } |
21143 | } |
21144 | |
21145 | at::Tensor & special_chebyshev_polynomial_w_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { |
21146 | if (false) { |
21147 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21148 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21149 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21150 | auto x_meta = to_meta(x); |
21151 | auto n_meta = to_meta(n); |
21152 | auto out_meta = to_meta(out); |
21153 | at::AutoDispatchSkipFunctionalize func_guard; |
21154 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21155 | at::_ops::special_chebyshev_polynomial_w_out::call(x_meta, n_meta, out_meta); |
21156 | } |
21157 | |
21158 | at::Tensor x_; |
21159 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
21160 | at::functionalization::impl::sync(x); |
21161 | x_ = at::functionalization::impl::from_functional_tensor(x); |
21162 | } else { |
21163 | x_ = x; |
21164 | } |
21165 | |
21166 | at::Tensor n_; |
21167 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21168 | at::functionalization::impl::sync(n); |
21169 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21170 | } else { |
21171 | n_ = n; |
21172 | } |
21173 | |
21174 | at::Tensor out_; |
21175 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21176 | at::functionalization::impl::sync(out); |
21177 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21178 | } else { |
21179 | out_ = out; |
21180 | } |
21181 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21182 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) { |
21183 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21184 | TORCH_INTERNAL_ASSERT(false, |
21185 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21186 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21187 | } else { |
21188 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21189 | at::AutoDispatchSkipFunctionalize guard; |
21190 | at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_w_out::call(x_, n_, out_); |
21191 | return out;; |
21192 | } |
21193 | } else { |
21194 | at::Tensor tmp_output; |
21195 | { |
21196 | at::AutoDispatchSkipFunctionalize guard; |
21197 | tmp_output = at::_ops::special_chebyshev_polynomial_w::call(x_, n_); |
21198 | } |
21199 | at::functionalization::impl::replace_(out, tmp_output); |
21200 | at::functionalization::impl::commit_update(out); |
21201 | at::functionalization::impl::sync(out); |
21202 | return out; |
21203 | } |
21204 | } |
21205 | |
21206 | at::Tensor & special_chebyshev_polynomial_w_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
21207 | if (false) { |
21208 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21209 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21210 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21211 | auto n_meta = to_meta(n); |
21212 | auto out_meta = to_meta(out); |
21213 | at::AutoDispatchSkipFunctionalize func_guard; |
21214 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21215 | at::_ops::special_chebyshev_polynomial_w_x_scalar_out::call(x, n_meta, out_meta); |
21216 | } |
21217 | |
21218 | at::Tensor n_; |
21219 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21220 | at::functionalization::impl::sync(n); |
21221 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21222 | } else { |
21223 | n_ = n; |
21224 | } |
21225 | |
21226 | at::Tensor out_; |
21227 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21228 | at::functionalization::impl::sync(out); |
21229 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21230 | } else { |
21231 | out_ = out; |
21232 | } |
21233 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21234 | if ((false || at::functionalization::impl::isFunctionalTensor(n))) { |
21235 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21236 | TORCH_INTERNAL_ASSERT(false, |
21237 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21238 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21239 | } else { |
21240 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21241 | at::AutoDispatchSkipFunctionalize guard; |
21242 | at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_w_x_scalar_out::call(x, n_, out_); |
21243 | return out;; |
21244 | } |
21245 | } else { |
21246 | at::Tensor tmp_output; |
21247 | { |
21248 | at::AutoDispatchSkipFunctionalize guard; |
21249 | tmp_output = at::_ops::special_chebyshev_polynomial_w_x_scalar::call(x, n_); |
21250 | } |
21251 | at::functionalization::impl::replace_(out, tmp_output); |
21252 | at::functionalization::impl::commit_update(out); |
21253 | at::functionalization::impl::sync(out); |
21254 | return out; |
21255 | } |
21256 | } |
21257 | |
21258 | at::Tensor & special_chebyshev_polynomial_w_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
21259 | if (false) { |
21260 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21261 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21262 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21263 | auto x_meta = to_meta(x); |
21264 | auto out_meta = to_meta(out); |
21265 | at::AutoDispatchSkipFunctionalize func_guard; |
21266 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21267 | at::_ops::special_chebyshev_polynomial_w_n_scalar_out::call(x_meta, n, out_meta); |
21268 | } |
21269 | |
21270 | at::Tensor x_; |
21271 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
21272 | at::functionalization::impl::sync(x); |
21273 | x_ = at::functionalization::impl::from_functional_tensor(x); |
21274 | } else { |
21275 | x_ = x; |
21276 | } |
21277 | |
21278 | at::Tensor out_; |
21279 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21280 | at::functionalization::impl::sync(out); |
21281 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21282 | } else { |
21283 | out_ = out; |
21284 | } |
21285 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21286 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
21287 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21288 | TORCH_INTERNAL_ASSERT(false, |
21289 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21290 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21291 | } else { |
21292 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21293 | at::AutoDispatchSkipFunctionalize guard; |
21294 | at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_w_n_scalar_out::call(x_, n, out_); |
21295 | return out;; |
21296 | } |
21297 | } else { |
21298 | at::Tensor tmp_output; |
21299 | { |
21300 | at::AutoDispatchSkipFunctionalize guard; |
21301 | tmp_output = at::_ops::special_chebyshev_polynomial_w_n_scalar::call(x_, n); |
21302 | } |
21303 | at::functionalization::impl::replace_(out, tmp_output); |
21304 | at::functionalization::impl::commit_update(out); |
21305 | at::functionalization::impl::sync(out); |
21306 | return out; |
21307 | } |
21308 | } |
21309 | |
21310 | at::Tensor & special_hermite_polynomial_he_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { |
21311 | if (false) { |
21312 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21313 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21314 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21315 | auto x_meta = to_meta(x); |
21316 | auto n_meta = to_meta(n); |
21317 | auto out_meta = to_meta(out); |
21318 | at::AutoDispatchSkipFunctionalize func_guard; |
21319 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21320 | at::_ops::special_hermite_polynomial_he_out::call(x_meta, n_meta, out_meta); |
21321 | } |
21322 | |
21323 | at::Tensor x_; |
21324 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
21325 | at::functionalization::impl::sync(x); |
21326 | x_ = at::functionalization::impl::from_functional_tensor(x); |
21327 | } else { |
21328 | x_ = x; |
21329 | } |
21330 | |
21331 | at::Tensor n_; |
21332 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21333 | at::functionalization::impl::sync(n); |
21334 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21335 | } else { |
21336 | n_ = n; |
21337 | } |
21338 | |
21339 | at::Tensor out_; |
21340 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21341 | at::functionalization::impl::sync(out); |
21342 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21343 | } else { |
21344 | out_ = out; |
21345 | } |
21346 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21347 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) { |
21348 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21349 | TORCH_INTERNAL_ASSERT(false, |
21350 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21351 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21352 | } else { |
21353 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21354 | at::AutoDispatchSkipFunctionalize guard; |
21355 | at::Tensor tmp_output = at::_ops::special_hermite_polynomial_he_out::call(x_, n_, out_); |
21356 | return out;; |
21357 | } |
21358 | } else { |
21359 | at::Tensor tmp_output; |
21360 | { |
21361 | at::AutoDispatchSkipFunctionalize guard; |
21362 | tmp_output = at::_ops::special_hermite_polynomial_he::call(x_, n_); |
21363 | } |
21364 | at::functionalization::impl::replace_(out, tmp_output); |
21365 | at::functionalization::impl::commit_update(out); |
21366 | at::functionalization::impl::sync(out); |
21367 | return out; |
21368 | } |
21369 | } |
21370 | |
21371 | at::Tensor & special_hermite_polynomial_he_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
21372 | if (false) { |
21373 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21374 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21375 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21376 | auto n_meta = to_meta(n); |
21377 | auto out_meta = to_meta(out); |
21378 | at::AutoDispatchSkipFunctionalize func_guard; |
21379 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21380 | at::_ops::special_hermite_polynomial_he_x_scalar_out::call(x, n_meta, out_meta); |
21381 | } |
21382 | |
21383 | at::Tensor n_; |
21384 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21385 | at::functionalization::impl::sync(n); |
21386 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21387 | } else { |
21388 | n_ = n; |
21389 | } |
21390 | |
21391 | at::Tensor out_; |
21392 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21393 | at::functionalization::impl::sync(out); |
21394 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21395 | } else { |
21396 | out_ = out; |
21397 | } |
21398 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21399 | if ((false || at::functionalization::impl::isFunctionalTensor(n))) { |
21400 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21401 | TORCH_INTERNAL_ASSERT(false, |
21402 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21403 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21404 | } else { |
21405 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21406 | at::AutoDispatchSkipFunctionalize guard; |
21407 | at::Tensor tmp_output = at::_ops::special_hermite_polynomial_he_x_scalar_out::call(x, n_, out_); |
21408 | return out;; |
21409 | } |
21410 | } else { |
21411 | at::Tensor tmp_output; |
21412 | { |
21413 | at::AutoDispatchSkipFunctionalize guard; |
21414 | tmp_output = at::_ops::special_hermite_polynomial_he_x_scalar::call(x, n_); |
21415 | } |
21416 | at::functionalization::impl::replace_(out, tmp_output); |
21417 | at::functionalization::impl::commit_update(out); |
21418 | at::functionalization::impl::sync(out); |
21419 | return out; |
21420 | } |
21421 | } |
21422 | |
21423 | at::Tensor & special_hermite_polynomial_he_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
21424 | if (false) { |
21425 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21426 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21427 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21428 | auto x_meta = to_meta(x); |
21429 | auto out_meta = to_meta(out); |
21430 | at::AutoDispatchSkipFunctionalize func_guard; |
21431 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21432 | at::_ops::special_hermite_polynomial_he_n_scalar_out::call(x_meta, n, out_meta); |
21433 | } |
21434 | |
21435 | at::Tensor x_; |
21436 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
21437 | at::functionalization::impl::sync(x); |
21438 | x_ = at::functionalization::impl::from_functional_tensor(x); |
21439 | } else { |
21440 | x_ = x; |
21441 | } |
21442 | |
21443 | at::Tensor out_; |
21444 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21445 | at::functionalization::impl::sync(out); |
21446 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21447 | } else { |
21448 | out_ = out; |
21449 | } |
21450 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21451 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
21452 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21453 | TORCH_INTERNAL_ASSERT(false, |
21454 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21455 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21456 | } else { |
21457 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21458 | at::AutoDispatchSkipFunctionalize guard; |
21459 | at::Tensor tmp_output = at::_ops::special_hermite_polynomial_he_n_scalar_out::call(x_, n, out_); |
21460 | return out;; |
21461 | } |
21462 | } else { |
21463 | at::Tensor tmp_output; |
21464 | { |
21465 | at::AutoDispatchSkipFunctionalize guard; |
21466 | tmp_output = at::_ops::special_hermite_polynomial_he_n_scalar::call(x_, n); |
21467 | } |
21468 | at::functionalization::impl::replace_(out, tmp_output); |
21469 | at::functionalization::impl::commit_update(out); |
21470 | at::functionalization::impl::sync(out); |
21471 | return out; |
21472 | } |
21473 | } |
21474 | |
21475 | at::Tensor & special_laguerre_polynomial_l_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { |
21476 | if (false) { |
21477 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21478 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21479 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21480 | auto x_meta = to_meta(x); |
21481 | auto n_meta = to_meta(n); |
21482 | auto out_meta = to_meta(out); |
21483 | at::AutoDispatchSkipFunctionalize func_guard; |
21484 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21485 | at::_ops::special_laguerre_polynomial_l_out::call(x_meta, n_meta, out_meta); |
21486 | } |
21487 | |
21488 | at::Tensor x_; |
21489 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
21490 | at::functionalization::impl::sync(x); |
21491 | x_ = at::functionalization::impl::from_functional_tensor(x); |
21492 | } else { |
21493 | x_ = x; |
21494 | } |
21495 | |
21496 | at::Tensor n_; |
21497 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21498 | at::functionalization::impl::sync(n); |
21499 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21500 | } else { |
21501 | n_ = n; |
21502 | } |
21503 | |
21504 | at::Tensor out_; |
21505 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21506 | at::functionalization::impl::sync(out); |
21507 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21508 | } else { |
21509 | out_ = out; |
21510 | } |
21511 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21512 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) { |
21513 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21514 | TORCH_INTERNAL_ASSERT(false, |
21515 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21516 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21517 | } else { |
21518 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21519 | at::AutoDispatchSkipFunctionalize guard; |
21520 | at::Tensor tmp_output = at::_ops::special_laguerre_polynomial_l_out::call(x_, n_, out_); |
21521 | return out;; |
21522 | } |
21523 | } else { |
21524 | at::Tensor tmp_output; |
21525 | { |
21526 | at::AutoDispatchSkipFunctionalize guard; |
21527 | tmp_output = at::_ops::special_laguerre_polynomial_l::call(x_, n_); |
21528 | } |
21529 | at::functionalization::impl::replace_(out, tmp_output); |
21530 | at::functionalization::impl::commit_update(out); |
21531 | at::functionalization::impl::sync(out); |
21532 | return out; |
21533 | } |
21534 | } |
21535 | |
21536 | at::Tensor & special_laguerre_polynomial_l_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
21537 | if (false) { |
21538 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21539 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21540 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21541 | auto n_meta = to_meta(n); |
21542 | auto out_meta = to_meta(out); |
21543 | at::AutoDispatchSkipFunctionalize func_guard; |
21544 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21545 | at::_ops::special_laguerre_polynomial_l_x_scalar_out::call(x, n_meta, out_meta); |
21546 | } |
21547 | |
21548 | at::Tensor n_; |
21549 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21550 | at::functionalization::impl::sync(n); |
21551 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21552 | } else { |
21553 | n_ = n; |
21554 | } |
21555 | |
21556 | at::Tensor out_; |
21557 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21558 | at::functionalization::impl::sync(out); |
21559 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21560 | } else { |
21561 | out_ = out; |
21562 | } |
21563 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21564 | if ((false || at::functionalization::impl::isFunctionalTensor(n))) { |
21565 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21566 | TORCH_INTERNAL_ASSERT(false, |
21567 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21568 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21569 | } else { |
21570 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21571 | at::AutoDispatchSkipFunctionalize guard; |
21572 | at::Tensor tmp_output = at::_ops::special_laguerre_polynomial_l_x_scalar_out::call(x, n_, out_); |
21573 | return out;; |
21574 | } |
21575 | } else { |
21576 | at::Tensor tmp_output; |
21577 | { |
21578 | at::AutoDispatchSkipFunctionalize guard; |
21579 | tmp_output = at::_ops::special_laguerre_polynomial_l_x_scalar::call(x, n_); |
21580 | } |
21581 | at::functionalization::impl::replace_(out, tmp_output); |
21582 | at::functionalization::impl::commit_update(out); |
21583 | at::functionalization::impl::sync(out); |
21584 | return out; |
21585 | } |
21586 | } |
21587 | |
21588 | at::Tensor & special_laguerre_polynomial_l_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
21589 | if (false) { |
21590 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21591 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21592 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21593 | auto x_meta = to_meta(x); |
21594 | auto out_meta = to_meta(out); |
21595 | at::AutoDispatchSkipFunctionalize func_guard; |
21596 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21597 | at::_ops::special_laguerre_polynomial_l_n_scalar_out::call(x_meta, n, out_meta); |
21598 | } |
21599 | |
21600 | at::Tensor x_; |
21601 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
21602 | at::functionalization::impl::sync(x); |
21603 | x_ = at::functionalization::impl::from_functional_tensor(x); |
21604 | } else { |
21605 | x_ = x; |
21606 | } |
21607 | |
21608 | at::Tensor out_; |
21609 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21610 | at::functionalization::impl::sync(out); |
21611 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21612 | } else { |
21613 | out_ = out; |
21614 | } |
21615 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21616 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
21617 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21618 | TORCH_INTERNAL_ASSERT(false, |
21619 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21620 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21621 | } else { |
21622 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21623 | at::AutoDispatchSkipFunctionalize guard; |
21624 | at::Tensor tmp_output = at::_ops::special_laguerre_polynomial_l_n_scalar_out::call(x_, n, out_); |
21625 | return out;; |
21626 | } |
21627 | } else { |
21628 | at::Tensor tmp_output; |
21629 | { |
21630 | at::AutoDispatchSkipFunctionalize guard; |
21631 | tmp_output = at::_ops::special_laguerre_polynomial_l_n_scalar::call(x_, n); |
21632 | } |
21633 | at::functionalization::impl::replace_(out, tmp_output); |
21634 | at::functionalization::impl::commit_update(out); |
21635 | at::functionalization::impl::sync(out); |
21636 | return out; |
21637 | } |
21638 | } |
21639 | |
21640 | at::Tensor & special_shifted_chebyshev_polynomial_v_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { |
21641 | if (false) { |
21642 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21643 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21644 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21645 | auto x_meta = to_meta(x); |
21646 | auto n_meta = to_meta(n); |
21647 | auto out_meta = to_meta(out); |
21648 | at::AutoDispatchSkipFunctionalize func_guard; |
21649 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21650 | at::_ops::special_shifted_chebyshev_polynomial_v_out::call(x_meta, n_meta, out_meta); |
21651 | } |
21652 | |
21653 | at::Tensor x_; |
21654 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
21655 | at::functionalization::impl::sync(x); |
21656 | x_ = at::functionalization::impl::from_functional_tensor(x); |
21657 | } else { |
21658 | x_ = x; |
21659 | } |
21660 | |
21661 | at::Tensor n_; |
21662 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21663 | at::functionalization::impl::sync(n); |
21664 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21665 | } else { |
21666 | n_ = n; |
21667 | } |
21668 | |
21669 | at::Tensor out_; |
21670 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21671 | at::functionalization::impl::sync(out); |
21672 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21673 | } else { |
21674 | out_ = out; |
21675 | } |
21676 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21677 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) { |
21678 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21679 | TORCH_INTERNAL_ASSERT(false, |
21680 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21681 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21682 | } else { |
21683 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21684 | at::AutoDispatchSkipFunctionalize guard; |
21685 | at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_v_out::call(x_, n_, out_); |
21686 | return out;; |
21687 | } |
21688 | } else { |
21689 | at::Tensor tmp_output; |
21690 | { |
21691 | at::AutoDispatchSkipFunctionalize guard; |
21692 | tmp_output = at::_ops::special_shifted_chebyshev_polynomial_v::call(x_, n_); |
21693 | } |
21694 | at::functionalization::impl::replace_(out, tmp_output); |
21695 | at::functionalization::impl::commit_update(out); |
21696 | at::functionalization::impl::sync(out); |
21697 | return out; |
21698 | } |
21699 | } |
21700 | |
21701 | at::Tensor & special_shifted_chebyshev_polynomial_v_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
21702 | if (false) { |
21703 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21704 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21705 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21706 | auto n_meta = to_meta(n); |
21707 | auto out_meta = to_meta(out); |
21708 | at::AutoDispatchSkipFunctionalize func_guard; |
21709 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21710 | at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar_out::call(x, n_meta, out_meta); |
21711 | } |
21712 | |
21713 | at::Tensor n_; |
21714 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21715 | at::functionalization::impl::sync(n); |
21716 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21717 | } else { |
21718 | n_ = n; |
21719 | } |
21720 | |
21721 | at::Tensor out_; |
21722 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21723 | at::functionalization::impl::sync(out); |
21724 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21725 | } else { |
21726 | out_ = out; |
21727 | } |
21728 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21729 | if ((false || at::functionalization::impl::isFunctionalTensor(n))) { |
21730 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21731 | TORCH_INTERNAL_ASSERT(false, |
21732 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21733 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21734 | } else { |
21735 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21736 | at::AutoDispatchSkipFunctionalize guard; |
21737 | at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar_out::call(x, n_, out_); |
21738 | return out;; |
21739 | } |
21740 | } else { |
21741 | at::Tensor tmp_output; |
21742 | { |
21743 | at::AutoDispatchSkipFunctionalize guard; |
21744 | tmp_output = at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar::call(x, n_); |
21745 | } |
21746 | at::functionalization::impl::replace_(out, tmp_output); |
21747 | at::functionalization::impl::commit_update(out); |
21748 | at::functionalization::impl::sync(out); |
21749 | return out; |
21750 | } |
21751 | } |
21752 | |
21753 | at::Tensor & special_shifted_chebyshev_polynomial_v_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
21754 | if (false) { |
21755 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21756 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21757 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21758 | auto x_meta = to_meta(x); |
21759 | auto out_meta = to_meta(out); |
21760 | at::AutoDispatchSkipFunctionalize func_guard; |
21761 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21762 | at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar_out::call(x_meta, n, out_meta); |
21763 | } |
21764 | |
21765 | at::Tensor x_; |
21766 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
21767 | at::functionalization::impl::sync(x); |
21768 | x_ = at::functionalization::impl::from_functional_tensor(x); |
21769 | } else { |
21770 | x_ = x; |
21771 | } |
21772 | |
21773 | at::Tensor out_; |
21774 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21775 | at::functionalization::impl::sync(out); |
21776 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21777 | } else { |
21778 | out_ = out; |
21779 | } |
21780 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21781 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
21782 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21783 | TORCH_INTERNAL_ASSERT(false, |
21784 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21785 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21786 | } else { |
21787 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21788 | at::AutoDispatchSkipFunctionalize guard; |
21789 | at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar_out::call(x_, n, out_); |
21790 | return out;; |
21791 | } |
21792 | } else { |
21793 | at::Tensor tmp_output; |
21794 | { |
21795 | at::AutoDispatchSkipFunctionalize guard; |
21796 | tmp_output = at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar::call(x_, n); |
21797 | } |
21798 | at::functionalization::impl::replace_(out, tmp_output); |
21799 | at::functionalization::impl::commit_update(out); |
21800 | at::functionalization::impl::sync(out); |
21801 | return out; |
21802 | } |
21803 | } |
21804 | |
21805 | void _fused_adamw_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) { |
21806 | if (false) { |
21807 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21808 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21809 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21810 | auto self_meta = to_meta(self); |
21811 | auto grads_meta = to_meta(grads); |
21812 | auto exp_avgs_meta = to_meta(exp_avgs); |
21813 | auto exp_avg_sqs_meta = to_meta(exp_avg_sqs); |
21814 | auto max_exp_avg_sqs_meta = to_meta(max_exp_avg_sqs); |
21815 | auto state_steps_meta = to_meta(state_steps); |
21816 | auto grad_scale_meta = to_meta(grad_scale); |
21817 | auto found_inf_meta = to_meta(found_inf); |
21818 | auto out_meta = to_meta(out); |
21819 | at::AutoDispatchSkipFunctionalize func_guard; |
21820 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21821 | at::_ops::_fused_adamw_out::call(self_meta, grads_meta, exp_avgs_meta, exp_avg_sqs_meta, max_exp_avg_sqs_meta, state_steps_meta, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_meta, found_inf_meta, out_meta); |
21822 | } |
21823 | |
21824 | ::std::vector<at::Tensor> self_; |
21825 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
21826 | at::functionalization::impl::sync(self); |
21827 | self_ = at::functionalization::impl::from_functional_tensor(self); |
21828 | } else { |
21829 | self_ = self.vec(); |
21830 | } |
21831 | |
21832 | ::std::vector<at::Tensor> grads_; |
21833 | if (at::functionalization::impl::isFunctionalTensor(grads)) { |
21834 | at::functionalization::impl::sync(grads); |
21835 | grads_ = at::functionalization::impl::from_functional_tensor(grads); |
21836 | } else { |
21837 | grads_ = grads.vec(); |
21838 | } |
21839 | |
21840 | ::std::vector<at::Tensor> exp_avgs_; |
21841 | if (at::functionalization::impl::isFunctionalTensor(exp_avgs)) { |
21842 | at::functionalization::impl::sync(exp_avgs); |
21843 | exp_avgs_ = at::functionalization::impl::from_functional_tensor(exp_avgs); |
21844 | } else { |
21845 | exp_avgs_ = exp_avgs.vec(); |
21846 | } |
21847 | |
21848 | ::std::vector<at::Tensor> exp_avg_sqs_; |
21849 | if (at::functionalization::impl::isFunctionalTensor(exp_avg_sqs)) { |
21850 | at::functionalization::impl::sync(exp_avg_sqs); |
21851 | exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(exp_avg_sqs); |
21852 | } else { |
21853 | exp_avg_sqs_ = exp_avg_sqs.vec(); |
21854 | } |
21855 | |
21856 | ::std::vector<at::Tensor> max_exp_avg_sqs_; |
21857 | if (at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs)) { |
21858 | at::functionalization::impl::sync(max_exp_avg_sqs); |
21859 | max_exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs); |
21860 | } else { |
21861 | max_exp_avg_sqs_ = max_exp_avg_sqs.vec(); |
21862 | } |
21863 | |
21864 | ::std::vector<at::Tensor> state_steps_; |
21865 | if (at::functionalization::impl::isFunctionalTensor(state_steps)) { |
21866 | at::functionalization::impl::sync(state_steps); |
21867 | state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps); |
21868 | } else { |
21869 | state_steps_ = state_steps.vec(); |
21870 | } |
21871 | |
21872 | c10::optional<at::Tensor> grad_scale_; |
21873 | if (at::functionalization::impl::isFunctionalTensor(grad_scale)) { |
21874 | at::functionalization::impl::sync(grad_scale); |
21875 | grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale); |
21876 | } else { |
21877 | grad_scale_ = grad_scale; |
21878 | } |
21879 | |
21880 | c10::optional<at::Tensor> found_inf_; |
21881 | if (at::functionalization::impl::isFunctionalTensor(found_inf)) { |
21882 | at::functionalization::impl::sync(found_inf); |
21883 | found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf); |
21884 | } else { |
21885 | found_inf_ = found_inf; |
21886 | } |
21887 | |
21888 | ::std::vector<at::Tensor> out_; |
21889 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21890 | at::functionalization::impl::sync(out); |
21891 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21892 | } else { |
21893 | out_ = out.vec(); |
21894 | } |
21895 | if (!(true && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(exp_avgs) && at::functionalization::impl::isFunctionalTensor(exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(out))) { |
21896 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(state_steps) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) { |
21897 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21898 | TORCH_INTERNAL_ASSERT(false, |
21899 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21900 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21901 | } else { |
21902 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21903 | at::AutoDispatchSkipFunctionalize guard; |
21904 | at::_ops::_fused_adamw_out::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_, out_); |
21905 | ; |
21906 | } |
21907 | } else { |
21908 | ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output; |
21909 | { |
21910 | at::AutoDispatchSkipFunctionalize guard; |
21911 | tmp_output = at::_ops::_fused_adamw::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_); |
21912 | } |
21913 | at::functionalization::impl::replace_(grads, std::get<0>(tmp_output)); |
21914 | at::functionalization::impl::commit_update(grads); |
21915 | at::functionalization::impl::sync(grads); |
21916 | at::functionalization::impl::replace_(exp_avgs, std::get<1>(tmp_output)); |
21917 | at::functionalization::impl::commit_update(exp_avgs); |
21918 | at::functionalization::impl::sync(exp_avgs); |
21919 | at::functionalization::impl::replace_(exp_avg_sqs, std::get<2>(tmp_output)); |
21920 | at::functionalization::impl::commit_update(exp_avg_sqs); |
21921 | at::functionalization::impl::sync(exp_avg_sqs); |
21922 | at::functionalization::impl::replace_(max_exp_avg_sqs, std::get<3>(tmp_output)); |
21923 | at::functionalization::impl::commit_update(max_exp_avg_sqs); |
21924 | at::functionalization::impl::sync(max_exp_avg_sqs); |
21925 | at::functionalization::impl::replace_(out, std::get<4>(tmp_output)); |
21926 | at::functionalization::impl::commit_update(out); |
21927 | at::functionalization::impl::sync(out); |
21928 | |
21929 | } |
21930 | } |
21931 | |
21932 | void _fused_adamw_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) { |
21933 | if (true) { |
21934 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21935 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21936 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21937 | auto self_meta = to_meta(self); |
21938 | auto grads_meta = to_meta(grads); |
21939 | auto exp_avgs_meta = to_meta(exp_avgs); |
21940 | auto exp_avg_sqs_meta = to_meta(exp_avg_sqs); |
21941 | auto max_exp_avg_sqs_meta = to_meta(max_exp_avg_sqs); |
21942 | auto state_steps_meta = to_meta(state_steps); |
21943 | auto grad_scale_meta = to_meta(grad_scale); |
21944 | auto found_inf_meta = to_meta(found_inf); |
21945 | at::AutoDispatchSkipFunctionalize func_guard; |
21946 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21947 | at::_ops::_fused_adamw_::call(self_meta, grads_meta, exp_avgs_meta, exp_avg_sqs_meta, max_exp_avg_sqs_meta, state_steps_meta, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_meta, found_inf_meta); |
21948 | } |
21949 | |
21950 | ::std::vector<at::Tensor> self_; |
21951 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
21952 | at::functionalization::impl::sync(self); |
21953 | self_ = at::functionalization::impl::from_functional_tensor(self); |
21954 | } else { |
21955 | self_ = self.vec(); |
21956 | } |
21957 | |
21958 | ::std::vector<at::Tensor> grads_; |
21959 | if (at::functionalization::impl::isFunctionalTensor(grads)) { |
21960 | at::functionalization::impl::sync(grads); |
21961 | grads_ = at::functionalization::impl::from_functional_tensor(grads); |
21962 | } else { |
21963 | grads_ = grads.vec(); |
21964 | } |
21965 | |
21966 | ::std::vector<at::Tensor> exp_avgs_; |
21967 | if (at::functionalization::impl::isFunctionalTensor(exp_avgs)) { |
21968 | at::functionalization::impl::sync(exp_avgs); |
21969 | exp_avgs_ = at::functionalization::impl::from_functional_tensor(exp_avgs); |
21970 | } else { |
21971 | exp_avgs_ = exp_avgs.vec(); |
21972 | } |
21973 | |
21974 | ::std::vector<at::Tensor> exp_avg_sqs_; |
21975 | if (at::functionalization::impl::isFunctionalTensor(exp_avg_sqs)) { |
21976 | at::functionalization::impl::sync(exp_avg_sqs); |
21977 | exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(exp_avg_sqs); |
21978 | } else { |
21979 | exp_avg_sqs_ = exp_avg_sqs.vec(); |
21980 | } |
21981 | |
21982 | ::std::vector<at::Tensor> max_exp_avg_sqs_; |
21983 | if (at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs)) { |
21984 | at::functionalization::impl::sync(max_exp_avg_sqs); |
21985 | max_exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs); |
21986 | } else { |
21987 | max_exp_avg_sqs_ = max_exp_avg_sqs.vec(); |
21988 | } |
21989 | |
21990 | ::std::vector<at::Tensor> state_steps_; |
21991 | if (at::functionalization::impl::isFunctionalTensor(state_steps)) { |
21992 | at::functionalization::impl::sync(state_steps); |
21993 | state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps); |
21994 | } else { |
21995 | state_steps_ = state_steps.vec(); |
21996 | } |
21997 | |
21998 | c10::optional<at::Tensor> grad_scale_; |
21999 | if (at::functionalization::impl::isFunctionalTensor(grad_scale)) { |
22000 | at::functionalization::impl::sync(grad_scale); |
22001 | grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale); |
22002 | } else { |
22003 | grad_scale_ = grad_scale; |
22004 | } |
22005 | |
22006 | c10::optional<at::Tensor> found_inf_; |
22007 | if (at::functionalization::impl::isFunctionalTensor(found_inf)) { |
22008 | at::functionalization::impl::sync(found_inf); |
22009 | found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf); |
22010 | } else { |
22011 | found_inf_ = found_inf; |
22012 | } |
22013 | if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(exp_avgs) && at::functionalization::impl::isFunctionalTensor(exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs))) { |
22014 | if ((false || at::functionalization::impl::isFunctionalTensor(state_steps) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) { |
22015 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
22016 | TORCH_INTERNAL_ASSERT(false, |
22017 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
22018 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
22019 | } else { |
22020 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
22021 | at::AutoDispatchSkipFunctionalize guard; |
22022 | at::_ops::_fused_adamw_::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_); |
22023 | ; |
22024 | } |
22025 | } else { |
22026 | ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output; |
22027 | { |
22028 | at::AutoDispatchSkipFunctionalize guard; |
22029 | tmp_output = at::_ops::_fused_adamw::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_); |
22030 | } |
22031 | at::functionalization::impl::replace_(self, std::get<0>(tmp_output)); |
22032 | at::functionalization::impl::commit_update(self); |
22033 | at::functionalization::impl::sync(self); |
22034 | at::functionalization::impl::replace_(grads, std::get<1>(tmp_output)); |
22035 | at::functionalization::impl::commit_update(grads); |
22036 | at::functionalization::impl::sync(grads); |
22037 | at::functionalization::impl::replace_(exp_avgs, std::get<2>(tmp_output)); |
22038 | at::functionalization::impl::commit_update(exp_avgs); |
22039 | at::functionalization::impl::sync(exp_avgs); |
22040 | at::functionalization::impl::replace_(exp_avg_sqs, std::get<3>(tmp_output)); |
22041 | at::functionalization::impl::commit_update(exp_avg_sqs); |
22042 | at::functionalization::impl::sync(exp_avg_sqs); |
22043 | at::functionalization::impl::replace_(max_exp_avg_sqs, std::get<4>(tmp_output)); |
22044 | at::functionalization::impl::commit_update(max_exp_avg_sqs); |
22045 | at::functionalization::impl::sync(max_exp_avg_sqs); |
22046 | |
22047 | } |
22048 | } |
22049 | |
22050 | at::Tensor _fw_primal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level) { |
22051 | |
22052 | at::Tensor self_; |
22053 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22054 | |
22055 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22056 | } else { |
22057 | self_ = self; |
22058 | } |
22059 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22060 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22061 | at::AutoDispatchSkipFunctionalize guard; |
22062 | return at::_ops::_fw_primal::call(self_, level); |
22063 | } |
22064 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22065 | auto compute_reference_meta = |
22066 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22067 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22068 | at::Tensor reference_tensor_output; |
22069 | if (compute_reference_meta) { |
22070 | auto self_meta = to_meta(self); |
22071 | at::AutoDispatchSkipFunctionalize func_guard; |
22072 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22073 | reference_tensor_output = at::_ops::_fw_primal::call(self_meta, level); |
22074 | } |
22075 | at::Tensor tmp_output; |
22076 | { |
22077 | at::AutoDispatchSkipFunctionalize guard; |
22078 | if (reapply_views) { |
22079 | tmp_output = at::_ops::_fw_primal::call(self_, level); |
22080 | } else { |
22081 | tmp_output = at::_ops::_fw_primal_copy::call(self_, level); |
22082 | } |
22083 | } |
22084 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22085 | [reapply_views = reapply_views, level = level](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22086 | if (reapply_views) { |
22087 | return at::_ops::_fw_primal::call(base, level); |
22088 | } else { |
22089 | return at::_ops::_fw_primal_copy::call(base, level); |
22090 | } |
22091 | }, |
22092 | [reapply_views = reapply_views, level = level](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22093 | return at::functionalization::FunctionalInverses::_fw_primal_copy_inverse(base, mutated_view, reapply_views, level); |
22094 | } |
22095 | ); |
22096 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22097 | // See Note [Propagating strides in the functionalization pass] |
22098 | if (compute_reference_meta) { |
22099 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22100 | } |
22101 | return out; |
22102 | } |
22103 | |
22104 | at::Tensor _make_dual(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { |
22105 | |
22106 | at::Tensor primal_; |
22107 | if (at::functionalization::impl::isFunctionalTensor(primal)) { |
22108 | |
22109 | primal_ = at::functionalization::impl::from_functional_tensor(primal); |
22110 | } else { |
22111 | primal_ = primal; |
22112 | } |
22113 | |
22114 | at::Tensor tangent_; |
22115 | if (at::functionalization::impl::isFunctionalTensor(tangent)) { |
22116 | |
22117 | tangent_ = at::functionalization::impl::from_functional_tensor(tangent); |
22118 | } else { |
22119 | tangent_ = tangent; |
22120 | } |
22121 | if (!at::functionalization::impl::isFunctionalTensor(primal)) { |
22122 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22123 | at::AutoDispatchSkipFunctionalize guard; |
22124 | return at::_ops::_make_dual::call(primal_, tangent_, level); |
22125 | } |
22126 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22127 | auto compute_reference_meta = |
22128 | primal.key_set().has_backend(c10::BackendComponent::XLABit) || |
22129 | primal.key_set().has_backend(c10::BackendComponent::LazyBit); |
22130 | at::Tensor reference_tensor_output; |
22131 | if (compute_reference_meta) { |
22132 | auto primal_meta = to_meta(primal); |
22133 | auto tangent_meta = to_meta(tangent); |
22134 | at::AutoDispatchSkipFunctionalize func_guard; |
22135 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22136 | reference_tensor_output = at::_ops::_make_dual::call(primal_meta, tangent_meta, level); |
22137 | } |
22138 | at::Tensor tmp_output; |
22139 | { |
22140 | at::AutoDispatchSkipFunctionalize guard; |
22141 | if (reapply_views) { |
22142 | tmp_output = at::_ops::_make_dual::call(primal_, tangent_, level); |
22143 | } else { |
22144 | tmp_output = at::_ops::_make_dual_copy::call(primal_, tangent_, level); |
22145 | } |
22146 | } |
22147 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22148 | [reapply_views = reapply_views, tangent = tangent, level = level](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22149 | if (reapply_views) { |
22150 | return at::_ops::_make_dual::call(base, tangent, level); |
22151 | } else { |
22152 | return at::_ops::_make_dual_copy::call(base, tangent, level); |
22153 | } |
22154 | }, |
22155 | [reapply_views = reapply_views, tangent = tangent, level = level](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22156 | return at::functionalization::FunctionalInverses::_make_dual_copy_inverse(base, mutated_view, reapply_views, tangent, level); |
22157 | } |
22158 | ); |
22159 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, primal, view_meta); |
22160 | // See Note [Propagating strides in the functionalization pass] |
22161 | if (compute_reference_meta) { |
22162 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22163 | } |
22164 | return out; |
22165 | } |
22166 | |
22167 | at::Tensor view_as_complex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { |
22168 | |
22169 | at::Tensor self_; |
22170 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22171 | |
22172 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22173 | } else { |
22174 | self_ = self; |
22175 | } |
22176 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22177 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22178 | at::AutoDispatchSkipFunctionalize guard; |
22179 | return at::_ops::view_as_complex::call(self_); |
22180 | } |
22181 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22182 | auto compute_reference_meta = |
22183 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22184 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22185 | at::Tensor reference_tensor_output; |
22186 | if (compute_reference_meta) { |
22187 | auto self_meta = to_meta(self); |
22188 | at::AutoDispatchSkipFunctionalize func_guard; |
22189 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22190 | reference_tensor_output = at::_ops::view_as_complex::call(self_meta); |
22191 | } |
22192 | at::Tensor tmp_output; |
22193 | { |
22194 | at::AutoDispatchSkipFunctionalize guard; |
22195 | if (reapply_views) { |
22196 | tmp_output = at::_ops::view_as_complex::call(self_); |
22197 | } else { |
22198 | tmp_output = at::_ops::view_as_complex_copy::call(self_); |
22199 | } |
22200 | } |
22201 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22202 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22203 | if (reapply_views) { |
22204 | return at::_ops::view_as_complex::call(base); |
22205 | } else { |
22206 | return at::_ops::view_as_complex_copy::call(base); |
22207 | } |
22208 | }, |
22209 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22210 | return at::functionalization::FunctionalInverses::view_as_complex_copy_inverse(base, mutated_view, reapply_views); |
22211 | } |
22212 | ); |
22213 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22214 | // See Note [Propagating strides in the functionalization pass] |
22215 | if (compute_reference_meta) { |
22216 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22217 | } |
22218 | return out; |
22219 | } |
22220 | |
22221 | at::Tensor expand(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) { |
22222 | |
22223 | at::Tensor self_; |
22224 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22225 | |
22226 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22227 | } else { |
22228 | self_ = self; |
22229 | } |
22230 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22231 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22232 | at::AutoDispatchSkipFunctionalize guard; |
22233 | return at::_ops::expand::call(self_, size, implicit); |
22234 | } |
22235 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22236 | auto compute_reference_meta = |
22237 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22238 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22239 | at::Tensor reference_tensor_output; |
22240 | if (compute_reference_meta) { |
22241 | auto self_meta = to_meta(self); |
22242 | at::AutoDispatchSkipFunctionalize func_guard; |
22243 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22244 | reference_tensor_output = at::_ops::expand::call(self_meta, size, implicit); |
22245 | } |
22246 | at::Tensor tmp_output; |
22247 | { |
22248 | at::AutoDispatchSkipFunctionalize guard; |
22249 | if (reapply_views) { |
22250 | tmp_output = at::_ops::expand::call(self_, size, implicit); |
22251 | } else { |
22252 | tmp_output = at::_ops::expand_copy::call(self_, size, implicit); |
22253 | } |
22254 | } |
22255 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22256 | [reapply_views = reapply_views, size = size.vec(), implicit = implicit](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22257 | if (reapply_views) { |
22258 | return at::_ops::expand::call(base, size, implicit); |
22259 | } else { |
22260 | return at::_ops::expand_copy::call(base, size, implicit); |
22261 | } |
22262 | }, |
22263 | [reapply_views = reapply_views, size = size.vec(), implicit = implicit](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22264 | return at::functionalization::FunctionalInverses::expand_copy_inverse(base, mutated_view, reapply_views, size, implicit); |
22265 | } |
22266 | ); |
22267 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22268 | // See Note [Propagating strides in the functionalization pass] |
22269 | if (compute_reference_meta) { |
22270 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22271 | } |
22272 | return out; |
22273 | } |
22274 | |
22275 | at::Tensor transpose_int(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) { |
22276 | |
22277 | at::Tensor self_; |
22278 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22279 | |
22280 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22281 | } else { |
22282 | self_ = self; |
22283 | } |
22284 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22285 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22286 | at::AutoDispatchSkipFunctionalize guard; |
22287 | return at::_ops::transpose_int::call(self_, dim0, dim1); |
22288 | } |
22289 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22290 | auto compute_reference_meta = |
22291 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22292 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22293 | at::Tensor reference_tensor_output; |
22294 | if (compute_reference_meta) { |
22295 | auto self_meta = to_meta(self); |
22296 | at::AutoDispatchSkipFunctionalize func_guard; |
22297 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22298 | reference_tensor_output = at::_ops::transpose_int::call(self_meta, dim0, dim1); |
22299 | } |
22300 | at::Tensor tmp_output; |
22301 | { |
22302 | at::AutoDispatchSkipFunctionalize guard; |
22303 | if (reapply_views) { |
22304 | tmp_output = at::_ops::transpose_int::call(self_, dim0, dim1); |
22305 | } else { |
22306 | tmp_output = at::_ops::transpose_copy_int::call(self_, dim0, dim1); |
22307 | } |
22308 | } |
22309 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22310 | [reapply_views = reapply_views, dim0 = dim0, dim1 = dim1](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22311 | if (reapply_views) { |
22312 | return at::_ops::transpose_int::call(base, dim0, dim1); |
22313 | } else { |
22314 | return at::_ops::transpose_copy_int::call(base, dim0, dim1); |
22315 | } |
22316 | }, |
22317 | [reapply_views = reapply_views, dim0 = dim0, dim1 = dim1](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22318 | return at::functionalization::FunctionalInverses::transpose_copy_int_inverse(base, mutated_view, reapply_views, dim0, dim1); |
22319 | } |
22320 | ); |
22321 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22322 | // See Note [Propagating strides in the functionalization pass] |
22323 | if (compute_reference_meta) { |
22324 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22325 | } |
22326 | return out; |
22327 | } |
22328 | |
22329 | at::Tensor & transpose_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) { |
22330 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22331 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22332 | |
22333 | at::Tensor self_; |
22334 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22335 | |
22336 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22337 | } else { |
22338 | self_ = self; |
22339 | } |
22340 | at::AutoDispatchSkipFunctionalize guard; |
22341 | return at::_ops::transpose_::call(self_, dim0, dim1); |
22342 | } |
22343 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22344 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22345 | [reapply_views = reapply_views, dim0 = dim0, dim1 = dim1](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22346 | if (reapply_views) { |
22347 | return at::_ops::transpose_int::call(base, dim0, dim1); |
22348 | } else { |
22349 | return at::_ops::transpose_copy_int::call(base, dim0, dim1); |
22350 | } |
22351 | }, |
22352 | [reapply_views = reapply_views, dim0 = dim0, dim1 = dim1](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22353 | return at::functionalization::FunctionalInverses::transpose_copy_int_inverse(base, mutated_view, reapply_views, dim0, dim1); |
22354 | } |
22355 | ); |
22356 | auto compute_reference_meta = |
22357 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22358 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22359 | at::Tensor reference_tensor_output; |
22360 | if (compute_reference_meta) { |
22361 | auto self_meta = to_meta(self); |
22362 | at::AutoDispatchSkipFunctionalize func_guard; |
22363 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22364 | reference_tensor_output = at::_ops::transpose_::call(self_meta, dim0, dim1); |
22365 | } |
22366 | // This function adds the above view meta to the current tensor and replays them off the base, |
22367 | // mutating the size/stride info of the current FunctionalTensorWrapper. |
22368 | // Because of this, we need to make sure to run the reference shape function above, |
22369 | // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides) |
22370 | at::functionalization::impl::mutate_view_meta(self, view_meta); |
22371 | // See Note [Propagating strides in the functionalization pass] |
22372 | // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely |
22373 | // on a reference implementation here (instead of relying on the output from the forward lambda |
22374 | // having the correct stride info) |
22375 | if (compute_reference_meta) { |
22376 | at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output); |
22377 | } |
22378 | return self; |
22379 | } |
22380 | |
22381 | at::Tensor _nested_view_from_buffer(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) { |
22382 | |
22383 | at::Tensor self_; |
22384 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22385 | |
22386 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22387 | } else { |
22388 | self_ = self; |
22389 | } |
22390 | |
22391 | at::Tensor nested_size_; |
22392 | if (at::functionalization::impl::isFunctionalTensor(nested_size)) { |
22393 | |
22394 | nested_size_ = at::functionalization::impl::from_functional_tensor(nested_size); |
22395 | } else { |
22396 | nested_size_ = nested_size; |
22397 | } |
22398 | |
22399 | at::Tensor nested_strides_; |
22400 | if (at::functionalization::impl::isFunctionalTensor(nested_strides)) { |
22401 | |
22402 | nested_strides_ = at::functionalization::impl::from_functional_tensor(nested_strides); |
22403 | } else { |
22404 | nested_strides_ = nested_strides; |
22405 | } |
22406 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22407 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22408 | at::AutoDispatchSkipFunctionalize guard; |
22409 | return at::_ops::_nested_view_from_buffer::call(self_, nested_size_, nested_strides_, offsets); |
22410 | } |
22411 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22412 | auto compute_reference_meta = |
22413 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22414 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22415 | at::Tensor reference_tensor_output; |
22416 | if (compute_reference_meta) { |
22417 | auto self_meta = to_meta(self); |
22418 | auto nested_size_meta = to_meta(nested_size); |
22419 | auto nested_strides_meta = to_meta(nested_strides); |
22420 | at::AutoDispatchSkipFunctionalize func_guard; |
22421 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22422 | reference_tensor_output = at::_ops::_nested_view_from_buffer::call(self_meta, nested_size_meta, nested_strides_meta, offsets); |
22423 | } |
22424 | at::Tensor tmp_output; |
22425 | { |
22426 | at::AutoDispatchSkipFunctionalize guard; |
22427 | if (reapply_views) { |
22428 | tmp_output = at::_ops::_nested_view_from_buffer::call(self_, nested_size_, nested_strides_, offsets); |
22429 | } else { |
22430 | tmp_output = at::_ops::_nested_view_from_buffer_copy::call(self_, nested_size_, nested_strides_, offsets); |
22431 | } |
22432 | } |
22433 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22434 | [reapply_views = reapply_views, nested_size = nested_size, nested_strides = nested_strides, offsets = offsets.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22435 | if (reapply_views) { |
22436 | return at::_ops::_nested_view_from_buffer::call(base, nested_size, nested_strides, offsets); |
22437 | } else { |
22438 | return at::_ops::_nested_view_from_buffer_copy::call(base, nested_size, nested_strides, offsets); |
22439 | } |
22440 | }, |
22441 | [reapply_views = reapply_views, nested_size = nested_size, nested_strides = nested_strides, offsets = offsets.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22442 | return at::functionalization::FunctionalInverses::_nested_view_from_buffer_copy_inverse(base, mutated_view, reapply_views, nested_size, nested_strides, offsets); |
22443 | } |
22444 | ); |
22445 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22446 | // See Note [Propagating strides in the functionalization pass] |
22447 | if (compute_reference_meta) { |
22448 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22449 | } |
22450 | return out; |
22451 | } |
22452 | |
22453 | at::Tensor unsqueeze(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { |
22454 | |
22455 | at::Tensor self_; |
22456 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22457 | |
22458 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22459 | } else { |
22460 | self_ = self; |
22461 | } |
22462 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22463 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22464 | at::AutoDispatchSkipFunctionalize guard; |
22465 | return at::_ops::unsqueeze::call(self_, dim); |
22466 | } |
22467 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22468 | auto compute_reference_meta = |
22469 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22470 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22471 | at::Tensor reference_tensor_output; |
22472 | if (compute_reference_meta) { |
22473 | auto self_meta = to_meta(self); |
22474 | at::AutoDispatchSkipFunctionalize func_guard; |
22475 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22476 | reference_tensor_output = at::_ops::unsqueeze::call(self_meta, dim); |
22477 | } |
22478 | at::Tensor tmp_output; |
22479 | { |
22480 | at::AutoDispatchSkipFunctionalize guard; |
22481 | if (reapply_views) { |
22482 | tmp_output = at::_ops::unsqueeze::call(self_, dim); |
22483 | } else { |
22484 | tmp_output = at::_ops::unsqueeze_copy::call(self_, dim); |
22485 | } |
22486 | } |
22487 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22488 | [reapply_views = reapply_views, dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22489 | if (reapply_views) { |
22490 | return at::_ops::unsqueeze::call(base, dim); |
22491 | } else { |
22492 | return at::_ops::unsqueeze_copy::call(base, dim); |
22493 | } |
22494 | }, |
22495 | [reapply_views = reapply_views, dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22496 | return at::functionalization::FunctionalInverses::unsqueeze_copy_inverse(base, mutated_view, reapply_views, dim); |
22497 | } |
22498 | ); |
22499 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22500 | // See Note [Propagating strides in the functionalization pass] |
22501 | if (compute_reference_meta) { |
22502 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22503 | } |
22504 | return out; |
22505 | } |
22506 | |
22507 | at::Tensor & unsqueeze_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim) { |
22508 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22509 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22510 | |
22511 | at::Tensor self_; |
22512 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22513 | |
22514 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22515 | } else { |
22516 | self_ = self; |
22517 | } |
22518 | at::AutoDispatchSkipFunctionalize guard; |
22519 | return at::_ops::unsqueeze_::call(self_, dim); |
22520 | } |
22521 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22522 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22523 | [reapply_views = reapply_views, dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22524 | if (reapply_views) { |
22525 | return at::_ops::unsqueeze::call(base, dim); |
22526 | } else { |
22527 | return at::_ops::unsqueeze_copy::call(base, dim); |
22528 | } |
22529 | }, |
22530 | [reapply_views = reapply_views, dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22531 | return at::functionalization::FunctionalInverses::unsqueeze_copy_inverse(base, mutated_view, reapply_views, dim); |
22532 | } |
22533 | ); |
22534 | auto compute_reference_meta = |
22535 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22536 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22537 | at::Tensor reference_tensor_output; |
22538 | if (compute_reference_meta) { |
22539 | auto self_meta = to_meta(self); |
22540 | at::AutoDispatchSkipFunctionalize func_guard; |
22541 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22542 | reference_tensor_output = at::_ops::unsqueeze_::call(self_meta, dim); |
22543 | } |
22544 | // This function adds the above view meta to the current tensor and replays them off the base, |
22545 | // mutating the size/stride info of the current FunctionalTensorWrapper. |
22546 | // Because of this, we need to make sure to run the reference shape function above, |
22547 | // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides) |
22548 | at::functionalization::impl::mutate_view_meta(self, view_meta); |
22549 | // See Note [Propagating strides in the functionalization pass] |
22550 | // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely |
22551 | // on a reference implementation here (instead of relying on the output from the forward lambda |
22552 | // having the correct stride info) |
22553 | if (compute_reference_meta) { |
22554 | at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output); |
22555 | } |
22556 | return self; |
22557 | } |
22558 | |
22559 | at::Tensor _values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { |
22560 | |
22561 | at::Tensor self_; |
22562 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22563 | |
22564 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22565 | } else { |
22566 | self_ = self; |
22567 | } |
22568 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22569 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22570 | at::AutoDispatchSkipFunctionalize guard; |
22571 | return at::_ops::_values::call(self_); |
22572 | } |
22573 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22574 | auto compute_reference_meta = |
22575 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22576 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22577 | at::Tensor reference_tensor_output; |
22578 | if (compute_reference_meta) { |
22579 | auto self_meta = to_meta(self); |
22580 | at::AutoDispatchSkipFunctionalize func_guard; |
22581 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22582 | reference_tensor_output = at::_ops::_values::call(self_meta); |
22583 | } |
22584 | at::Tensor tmp_output; |
22585 | { |
22586 | at::AutoDispatchSkipFunctionalize guard; |
22587 | if (reapply_views) { |
22588 | tmp_output = at::_ops::_values::call(self_); |
22589 | } else { |
22590 | tmp_output = at::_ops::_values_copy::call(self_); |
22591 | } |
22592 | } |
22593 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22594 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22595 | if (reapply_views) { |
22596 | return at::_ops::_values::call(base); |
22597 | } else { |
22598 | return at::_ops::_values_copy::call(base); |
22599 | } |
22600 | }, |
22601 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22602 | return at::functionalization::FunctionalInverses::_values_copy_inverse(base, mutated_view, reapply_views); |
22603 | } |
22604 | ); |
22605 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22606 | // See Note [Propagating strides in the functionalization pass] |
22607 | if (compute_reference_meta) { |
22608 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22609 | } |
22610 | return out; |
22611 | } |
22612 | |
22613 | at::Tensor ccol_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { |
22614 | |
22615 | at::Tensor self_; |
22616 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22617 | |
22618 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22619 | } else { |
22620 | self_ = self; |
22621 | } |
22622 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22623 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22624 | at::AutoDispatchSkipFunctionalize guard; |
22625 | return at::_ops::ccol_indices::call(self_); |
22626 | } |
22627 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22628 | auto compute_reference_meta = |
22629 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22630 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22631 | at::Tensor reference_tensor_output; |
22632 | if (compute_reference_meta) { |
22633 | auto self_meta = to_meta(self); |
22634 | at::AutoDispatchSkipFunctionalize func_guard; |
22635 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22636 | reference_tensor_output = at::_ops::ccol_indices::call(self_meta); |
22637 | } |
22638 | at::Tensor tmp_output; |
22639 | { |
22640 | at::AutoDispatchSkipFunctionalize guard; |
22641 | if (reapply_views) { |
22642 | tmp_output = at::_ops::ccol_indices::call(self_); |
22643 | } else { |
22644 | tmp_output = at::_ops::ccol_indices_copy::call(self_); |
22645 | } |
22646 | } |
22647 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22648 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22649 | if (reapply_views) { |
22650 | return at::_ops::ccol_indices::call(base); |
22651 | } else { |
22652 | return at::_ops::ccol_indices_copy::call(base); |
22653 | } |
22654 | }, |
22655 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22656 | return at::functionalization::FunctionalInverses::ccol_indices_copy_inverse(base, mutated_view, reapply_views); |
22657 | } |
22658 | ); |
22659 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22660 | // See Note [Propagating strides in the functionalization pass] |
22661 | if (compute_reference_meta) { |
22662 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22663 | } |
22664 | return out; |
22665 | } |
22666 | |
22667 | at::Tensor view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) { |
22668 | |
22669 | at::Tensor self_; |
22670 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22671 | |
22672 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22673 | } else { |
22674 | self_ = self; |
22675 | } |
22676 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22677 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22678 | at::AutoDispatchSkipFunctionalize guard; |
22679 | return at::_ops::view::call(self_, size); |
22680 | } |
22681 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22682 | auto compute_reference_meta = |
22683 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22684 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22685 | at::Tensor reference_tensor_output; |
22686 | if (compute_reference_meta) { |
22687 | auto self_meta = to_meta(self); |
22688 | at::AutoDispatchSkipFunctionalize func_guard; |
22689 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22690 | reference_tensor_output = at::_ops::view::call(self_meta, size); |
22691 | } |
22692 | at::Tensor tmp_output; |
22693 | { |
22694 | at::AutoDispatchSkipFunctionalize guard; |
22695 | if (reapply_views) { |
22696 | tmp_output = at::_ops::view::call(self_, size); |
22697 | } else { |
22698 | tmp_output = at::_ops::view_copy::call(self_, size); |
22699 | } |
22700 | } |
22701 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22702 | [reapply_views = reapply_views, size = size.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22703 | if (reapply_views) { |
22704 | return at::_ops::view::call(base, size); |
22705 | } else { |
22706 | return at::_ops::view_copy::call(base, size); |
22707 | } |
22708 | }, |
22709 | [reapply_views = reapply_views, size = size.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22710 | return at::functionalization::FunctionalInverses::view_copy_inverse(base, mutated_view, reapply_views, size); |
22711 | } |
22712 | ); |
22713 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22714 | // See Note [Propagating strides in the functionalization pass] |
22715 | if (compute_reference_meta) { |
22716 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22717 | } |
22718 | return out; |
22719 | } |
22720 | |
22721 | at::Tensor view_dtype(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) { |
22722 | |
22723 | at::Tensor self_; |
22724 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22725 | |
22726 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22727 | } else { |
22728 | self_ = self; |
22729 | } |
22730 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22731 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22732 | at::AutoDispatchSkipFunctionalize guard; |
22733 | return at::_ops::view_dtype::call(self_, dtype); |
22734 | } |
22735 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22736 | auto compute_reference_meta = |
22737 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22738 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22739 | at::Tensor reference_tensor_output; |
22740 | if (compute_reference_meta) { |
22741 | auto self_meta = to_meta(self); |
22742 | at::AutoDispatchSkipFunctionalize func_guard; |
22743 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22744 | reference_tensor_output = at::_ops::view_dtype::call(self_meta, dtype); |
22745 | } |
22746 | at::Tensor tmp_output; |
22747 | { |
22748 | at::AutoDispatchSkipFunctionalize guard; |
22749 | if (reapply_views) { |
22750 | tmp_output = at::_ops::view_dtype::call(self_, dtype); |
22751 | } else { |
22752 | tmp_output = at::_ops::view_copy_dtype::call(self_, dtype); |
22753 | } |
22754 | } |
22755 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22756 | [reapply_views = reapply_views, dtype = dtype](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22757 | if (reapply_views) { |
22758 | return at::_ops::view_dtype::call(base, dtype); |
22759 | } else { |
22760 | return at::_ops::view_copy_dtype::call(base, dtype); |
22761 | } |
22762 | }, |
22763 | [reapply_views = reapply_views, dtype = dtype](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22764 | return at::functionalization::FunctionalInverses::view_copy_dtype_inverse(base, mutated_view, reapply_views, dtype); |
22765 | } |
22766 | ); |
22767 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22768 | // See Note [Propagating strides in the functionalization pass] |
22769 | if (compute_reference_meta) { |
22770 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22771 | } |
22772 | return out; |
22773 | } |
22774 | |
22775 | } // namespace functionalization |
22776 | |
22777 | namespace { |
22778 | |
22779 | TORCH_LIBRARY_IMPL(aten, Functionalize, m) { |
22780 | m.impl("_masked_scale.out" , TORCH_FN(functionalization::_masked_scale_out_out)); |
22781 | m.impl("native_dropout.out" , TORCH_FN(functionalization::native_dropout_out_out)); |
22782 | m.impl("native_dropout_backward.out" , TORCH_FN(functionalization::native_dropout_backward_out_out)); |
22783 | m.impl("asinh.out" , TORCH_FN(functionalization::asinh_out_out)); |
22784 | m.impl("asinh_" , TORCH_FN(functionalization::asinh_)); |
22785 | m.impl("arctanh" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::arctanh)); |
22786 | m.impl("arctanh.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::arctanh_out)); |
22787 | m.impl("arctanh_" , static_cast<at::Tensor & (*)(at::Tensor & self)>(at::native::arctanh_)); |
22788 | m.impl("baddbmm.out" , TORCH_FN(functionalization::baddbmm_out_out)); |
22789 | m.impl("baddbmm_" , TORCH_FN(functionalization::baddbmm_)); |
22790 | m.impl("quantized_batch_norm.out" , TORCH_FN(functionalization::quantized_batch_norm_out_out)); |
22791 | m.impl("bernoulli.out" , TORCH_FN(functionalization::bernoulli_out_out)); |
22792 | m.impl("bernoulli.Tensor_out" , TORCH_FN(functionalization::bernoulli_out_Tensor_out)); |
22793 | m.impl("bernoulli_.Tensor" , TORCH_FN(functionalization::bernoulli__Tensor)); |
22794 | m.impl("bernoulli.float_out" , TORCH_FN(functionalization::bernoulli_out_float_out)); |
22795 | m.impl("bernoulli_.float" , TORCH_FN(functionalization::bernoulli__float)); |
22796 | m.impl("bmm.out" , TORCH_FN(functionalization::bmm_out_out)); |
22797 | m.impl("clamp_max.out" , TORCH_FN(functionalization::clamp_max_out_out)); |
22798 | m.impl("clamp_max_" , TORCH_FN(functionalization::clamp_max_)); |
22799 | m.impl("clamp_max.Tensor_out" , TORCH_FN(functionalization::clamp_max_out_Tensor_out)); |
22800 | m.impl("clamp_max_.Tensor" , TORCH_FN(functionalization::clamp_max__Tensor)); |
22801 | m.impl("clamp_min.out" , TORCH_FN(functionalization::clamp_min_out_out)); |
22802 | m.impl("clamp_min_" , TORCH_FN(functionalization::clamp_min_)); |
22803 | m.impl("clamp_min.Tensor_out" , TORCH_FN(functionalization::clamp_min_out_Tensor_out)); |
22804 | m.impl("clamp_min_.Tensor" , TORCH_FN(functionalization::clamp_min__Tensor)); |
22805 | m.impl("clip" , static_cast<at::Tensor (*)(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max)>(at::native::clip)); |
22806 | m.impl("clip.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max, at::Tensor & out)>(at::native::clip_out)); |
22807 | m.impl("clip_" , static_cast<at::Tensor & (*)(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max)>(at::native::clip_)); |
22808 | m.impl("clip.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max)>(at::native::clip)); |
22809 | m.impl("clip.Tensor_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max, at::Tensor & out)>(at::native::clip_out)); |
22810 | m.impl("clip_.Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max)>(at::native::clip_)); |
22811 | m.impl("complex.out" , TORCH_FN(functionalization::complex_out_out)); |
22812 | m.impl("constant_pad_nd.out" , TORCH_FN(functionalization::constant_pad_nd_out_out)); |
22813 | m.impl("conv_tbc.out" , TORCH_FN(functionalization::conv_tbc_out_out)); |
22814 | m.impl("_copy_from_and_resize.out" , TORCH_FN(functionalization::_copy_from_and_resize_out_out)); |
22815 | m.impl("cos.out" , TORCH_FN(functionalization::cos_out_out)); |
22816 | m.impl("cos_" , TORCH_FN(functionalization::cos_)); |
22817 | m.impl("count_nonzero.dim_IntList_out" , TORCH_FN(functionalization::count_nonzero_out_dim_IntList_out)); |
22818 | m.impl("count_nonzero.out" , TORCH_FN(functionalization::count_nonzero_out_out)); |
22819 | m.impl("cudnn_affine_grid_generator_backward.out" , TORCH_FN(functionalization::cudnn_affine_grid_generator_backward_out_out)); |
22820 | m.impl("cudnn_batch_norm.out" , TORCH_FN(functionalization::cudnn_batch_norm_out_out)); |
22821 | m.impl("mps_convolution_transpose_backward.out" , TORCH_FN(functionalization::mps_convolution_transpose_backward_out_out)); |
22822 | m.impl("cudnn_grid_sampler_backward.out" , TORCH_FN(functionalization::cudnn_grid_sampler_backward_out_out)); |
22823 | m.impl("cummin.out" , TORCH_FN(functionalization::cummin_out_out)); |
22824 | m.impl("cummin.dimname" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::Dimname dim)>(at::native::cummin)); |
22825 | m.impl("cummin.dimname_out" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices)>(at::native::cummin_out)); |
22826 | m.impl("cumsum.out" , TORCH_FN(functionalization::cumsum_out_out)); |
22827 | m.impl("cumsum_" , TORCH_FN(functionalization::cumsum_)); |
22828 | m.impl("cumsum.dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype)>(at::native::cumsum)); |
22829 | m.impl("cumsum.dimname_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out)>(at::native::cumsum_out)); |
22830 | m.impl("cumsum_.dimname" , static_cast<at::Tensor & (*)(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype)>(at::native::cumsum_)); |
22831 | m.impl("_ctc_loss.out" , TORCH_FN(functionalization::_ctc_loss_out_out)); |
22832 | m.impl("_ctc_loss.Tensor_out" , TORCH_FN(functionalization::_ctc_loss_out_Tensor_out)); |
22833 | m.impl("_ctc_loss_backward.out" , TORCH_FN(functionalization::_ctc_loss_backward_out_out)); |
22834 | m.impl("embedding.out" , TORCH_FN(functionalization::embedding_out_out)); |
22835 | m.impl("embedding_dense_backward.out" , TORCH_FN(functionalization::embedding_dense_backward_out_out)); |
22836 | m.impl("new_zeros.out" , TORCH_FN(functionalization::new_zeros_out_out)); |
22837 | m.impl("new_ones.out" , TORCH_FN(functionalization::new_ones_out_out)); |
22838 | m.impl("_empty_per_channel_affine_quantized.out" , TORCH_FN(functionalization::_empty_per_channel_affine_quantized_out_out)); |
22839 | m.impl("exp2.out" , TORCH_FN(functionalization::exp2_out_out)); |
22840 | m.impl("exp2_" , TORCH_FN(functionalization::exp2_)); |
22841 | m.impl("_grid_sampler_2d_cpu_fallback.out" , TORCH_FN(functionalization::_grid_sampler_2d_cpu_fallback_out_out)); |
22842 | m.impl("grid_sampler_3d_backward.out" , TORCH_FN(functionalization::grid_sampler_3d_backward_out_out)); |
22843 | m.impl("_fft_c2c.out" , TORCH_FN(functionalization::_fft_c2c_out_out)); |
22844 | m.impl("index_copy.out" , TORCH_FN(functionalization::index_copy_out_out)); |
22845 | m.impl("index_copy_" , TORCH_FN(functionalization::index_copy_)); |
22846 | m.impl("isin.Tensor_Tensor_out" , TORCH_FN(functionalization::isin_out_Tensor_Tensor_out)); |
22847 | m.impl("isin.Tensor_Scalar_out" , TORCH_FN(functionalization::isin_out_Tensor_Scalar_out)); |
22848 | m.impl("isin.Scalar_Tensor_out" , TORCH_FN(functionalization::isin_out_Scalar_Tensor_out)); |
22849 | m.impl("kron" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::kron)); |
22850 | m.impl("kron.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::kron_out)); |
22851 | m.impl("nan_to_num.out" , TORCH_FN(functionalization::nan_to_num_out_out)); |
22852 | m.impl("nan_to_num_" , TORCH_FN(functionalization::nan_to_num_)); |
22853 | m.impl("linear" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias)>(at::native::linear)); |
22854 | m.impl("linear.out" , TORCH_FN(functionalization::linear_out_out)); |
22855 | m.impl("mkldnn_linear.out" , TORCH_FN(functionalization::mkldnn_linear_out_out)); |
22856 | m.impl("linspace.out" , TORCH_FN(functionalization::linspace_out_out)); |
22857 | m.impl("log.out" , TORCH_FN(functionalization::log_out_out)); |
22858 | m.impl("log_" , TORCH_FN(functionalization::log_)); |
22859 | m.impl("log_softmax.int" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype)>(at::native::log_softmax)); |
22860 | m.impl("log_softmax.int_out" , TORCH_FN(functionalization::log_softmax_out_int_out)); |
22861 | m.impl("_log_softmax.out" , TORCH_FN(functionalization::_log_softmax_out_out)); |
22862 | m.impl("_log_softmax_backward_data.out" , TORCH_FN(functionalization::_log_softmax_backward_data_out_out)); |
22863 | m.impl("max.dim_max" , TORCH_FN(functionalization::max_out_dim_max)); |
22864 | m.impl("max.names_dim" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::Dimname dim, bool keepdim)>(at::native::max)); |
22865 | m.impl("max.names_dim_max" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values)>(at::native::max_out)); |
22866 | m.impl("amax.out" , TORCH_FN(functionalization::amax_out_out)); |
22867 | m.impl("mkldnn_max_pool2d_backward.out" , TORCH_FN(functionalization::mkldnn_max_pool2d_backward_out_out)); |
22868 | m.impl("mkldnn_max_pool3d.out" , TORCH_FN(functionalization::mkldnn_max_pool3d_out_out)); |
22869 | m.impl("quantized_max_pool2d.out" , TORCH_FN(functionalization::quantized_max_pool2d_out_out)); |
22870 | m.impl("mean.out" , TORCH_FN(functionalization::mean_out_out)); |
22871 | m.impl("mean.names_dim" , static_cast<at::Tensor (*)(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype)>(at::native::mean)); |
22872 | m.impl("mean.names_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out)>(at::native::mean_out)); |
22873 | m.impl("nanmedian.out" , TORCH_FN(functionalization::nanmedian_out_out)); |
22874 | m.impl("nanmedian.dim_values" , TORCH_FN(functionalization::nanmedian_out_dim_values)); |
22875 | m.impl("nanmedian.names_dim" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::Dimname dim, bool keepdim)>(at::native::nanmedian)); |
22876 | m.impl("nanmedian.names_dim_values" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices)>(at::native::nanmedian_out)); |
22877 | m.impl("mode.values" , TORCH_FN(functionalization::mode_out_values)); |
22878 | m.impl("mode.dimname" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::Dimname dim, bool keepdim)>(at::native::mode)); |
22879 | m.impl("mode.dimname_out" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices)>(at::native::mode_out)); |
22880 | m.impl("multiply.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::multiply)); |
22881 | m.impl("multiply.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::multiply_out)); |
22882 | m.impl("multiply_.Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other)>(at::native::multiply_)); |
22883 | m.impl("narrow_copy.out" , TORCH_FN(functionalization::narrow_copy_out_out)); |
22884 | m.impl("batch_norm_gather_stats.out" , TORCH_FN(functionalization::batch_norm_gather_stats_out_out)); |
22885 | m.impl("batch_norm_gather_stats_with_counts.out" , TORCH_FN(functionalization::batch_norm_gather_stats_with_counts_out_out)); |
22886 | m.impl("native_batch_norm_backward.out" , TORCH_FN(functionalization::native_batch_norm_backward_out_out)); |
22887 | m.impl("ones.names_out" , TORCH_FN(functionalization::ones_out_names_out)); |
22888 | m.impl("ones.out" , TORCH_FN(functionalization::ones_out_out)); |
22889 | m.impl("_pdist_forward.out" , TORCH_FN(functionalization::_pdist_forward_out_out)); |
22890 | m.impl("_pdist_backward.out" , TORCH_FN(functionalization::_pdist_backward_out_out)); |
22891 | m.impl("pixel_shuffle.out" , TORCH_FN(functionalization::pixel_shuffle_out_out)); |
22892 | m.impl("_pin_memory.out" , TORCH_FN(functionalization::_pin_memory_out_out)); |
22893 | m.impl("randn.out" , static_cast<at::Tensor & (*)(at::IntArrayRef size, at::Tensor & out)>(at::native::randn_out)); |
22894 | m.impl("randn.generator_out" , static_cast<at::Tensor & (*)(at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out)>(at::native::randn_out)); |
22895 | m.impl("randn.names_out" , TORCH_FN(functionalization::randn_out_names_out)); |
22896 | m.impl("randn.generator_with_names_out" , TORCH_FN(functionalization::randn_out_generator_with_names_out)); |
22897 | m.impl("randn_like.out" , TORCH_FN(functionalization::randn_like_out_out)); |
22898 | m.impl("neg.out" , TORCH_FN(functionalization::neg_out_out)); |
22899 | m.impl("neg_" , TORCH_FN(functionalization::neg_)); |
22900 | m.impl("negative" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::negative)); |
22901 | m.impl("negative.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::negative_out)); |
22902 | m.impl("negative_" , static_cast<at::Tensor & (*)(at::Tensor & self)>(at::native::negative_)); |
22903 | m.impl("repeat_interleave.Tensor_out" , TORCH_FN(functionalization::repeat_interleave_out_Tensor_out)); |
22904 | m.impl("gelu.out" , TORCH_FN(functionalization::gelu_out_out)); |
22905 | m.impl("gelu_" , TORCH_FN(functionalization::gelu_)); |
22906 | m.impl("select_backward.out" , TORCH_FN(functionalization::select_backward_out_out)); |
22907 | m.impl("mish.out" , TORCH_FN(functionalization::mish_out_out)); |
22908 | m.impl("mish_" , TORCH_FN(functionalization::mish_)); |
22909 | m.impl("slice_scatter.out" , TORCH_FN(functionalization::slice_scatter_out_out)); |
22910 | m.impl("diagonal_scatter.out" , TORCH_FN(functionalization::diagonal_scatter_out_out)); |
22911 | m.impl("_softmax_backward_data.out" , TORCH_FN(functionalization::_softmax_backward_data_out_out)); |
22912 | m.impl("unsafe_split.Tensor_out" , TORCH_FN(functionalization::unsafe_split_out_Tensor_out)); |
22913 | m.impl("unsafe_split_with_sizes.out" , TORCH_FN(functionalization::unsafe_split_with_sizes_out_out)); |
22914 | m.impl("square" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::square)); |
22915 | m.impl("square.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::square_out)); |
22916 | m.impl("square_" , static_cast<at::Tensor & (*)(at::Tensor & self)>(at::native::square_)); |
22917 | m.impl("tanh.out" , TORCH_FN(functionalization::tanh_out_out)); |
22918 | m.impl("tanh_" , TORCH_FN(functionalization::tanh_)); |
22919 | m.impl("roll.out" , TORCH_FN(functionalization::roll_out_out)); |
22920 | m.impl("rot90.out" , TORCH_FN(functionalization::rot90_out_out)); |
22921 | m.impl("_trilinear.out" , TORCH_FN(functionalization::_trilinear_out_out)); |
22922 | m.impl("_unique.out" , TORCH_FN(functionalization::_unique_out_out)); |
22923 | m.impl("_unique2.out" , TORCH_FN(functionalization::_unique2_out_out)); |
22924 | m.impl("_weight_norm_interface.out" , TORCH_FN(functionalization::_weight_norm_interface_out_out)); |
22925 | m.impl("_efficientzerotensor.out" , TORCH_FN(functionalization::_efficientzerotensor_out_out)); |
22926 | m.impl("_standard_gamma.out" , TORCH_FN(functionalization::_standard_gamma_out_out)); |
22927 | m.impl("_dirichlet_grad.out" , TORCH_FN(functionalization::_dirichlet_grad_out_out)); |
22928 | m.impl("norm.ScalarOpt_dtype_out" , TORCH_FN(functionalization::norm_out_ScalarOpt_dtype_out)); |
22929 | m.impl("norm.Scalar_out" , TORCH_FN(functionalization::norm_out_Scalar_out)); |
22930 | m.impl("norm.dtype_out" , TORCH_FN(functionalization::norm_out_dtype_out)); |
22931 | m.impl("norm.out" , TORCH_FN(functionalization::norm_out_out)); |
22932 | m.impl("norm.names_ScalarOpt_dim_dtype" , static_cast<at::Tensor (*)(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype)>(at::native::norm)); |
22933 | m.impl("norm.names_dtype_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out)>(at::native::norm_out)); |
22934 | m.impl("norm.names_ScalarOpt_dim" , static_cast<at::Tensor (*)(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim)>(at::native::norm)); |
22935 | m.impl("norm.names_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out)>(at::native::norm_out)); |
22936 | m.impl("frexp.Tensor_out" , TORCH_FN(functionalization::frexp_out_Tensor_out)); |
22937 | m.impl("frobenius_norm.dim" , static_cast<at::Tensor (*)(const at::Tensor & self, at::IntArrayRef dim, bool keepdim)>(at::native::frobenius_norm)); |
22938 | m.impl("frobenius_norm.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out)>(at::native::frobenius_norm_out)); |
22939 | m.impl("nuclear_norm" , static_cast<at::Tensor (*)(const at::Tensor & self, bool keepdim)>(at::native::nuclear_norm)); |
22940 | m.impl("nuclear_norm.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, bool keepdim, at::Tensor & out)>(at::native::nuclear_norm_out)); |
22941 | m.impl("nuclear_norm.dim" , static_cast<at::Tensor (*)(const at::Tensor & self, at::IntArrayRef dim, bool keepdim)>(at::native::nuclear_norm)); |
22942 | m.impl("nuclear_norm.dim_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out)>(at::native::nuclear_norm_out)); |
22943 | m.impl("subtract.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha)>(at::native::subtract)); |
22944 | m.impl("subtract.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out)>(at::native::subtract_out)); |
22945 | m.impl("subtract_.Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha)>(at::native::subtract_)); |
22946 | m.impl("sparse_sampled_addmm.out" , TORCH_FN(functionalization::sparse_sampled_addmm_out_out)); |
22947 | m.impl("_addmm_activation.out" , TORCH_FN(functionalization::_addmm_activation_out_out)); |
22948 | m.impl("_to_dense.out" , TORCH_FN(functionalization::_to_dense_out_out)); |
22949 | m.impl("_coalesced.out" , TORCH_FN(functionalization::_coalesced_out_out)); |
22950 | m.impl("_coalesced_" , TORCH_FN(functionalization::_coalesced_)); |
22951 | m.impl("to_sparse_csr.out" , TORCH_FN(functionalization::to_sparse_csr_out_out)); |
22952 | m.impl("to_sparse_csc.out" , TORCH_FN(functionalization::to_sparse_csc_out_out)); |
22953 | m.impl("to_sparse_bsc.out" , TORCH_FN(functionalization::to_sparse_bsc_out_out)); |
22954 | m.impl("quantize_per_tensor_dynamic.out" , TORCH_FN(functionalization::quantize_per_tensor_dynamic_out_out)); |
22955 | m.impl("quantize_per_tensor.out" , TORCH_FN(functionalization::quantize_per_tensor_out_out)); |
22956 | m.impl("quantize_per_tensor.tensor_qparams_out" , TORCH_FN(functionalization::quantize_per_tensor_out_tensor_qparams_out)); |
22957 | m.impl("quantize_per_tensor.tensors_out" , TORCH_FN(functionalization::quantize_per_tensor_out_tensors_out)); |
22958 | m.impl("quantize_per_channel.out" , TORCH_FN(functionalization::quantize_per_channel_out_out)); |
22959 | m.impl("_make_per_channel_quantized_tensor.out" , TORCH_FN(functionalization::_make_per_channel_quantized_tensor_out_out)); |
22960 | m.impl("fake_quantize_per_tensor_affine_cachemask.out" , TORCH_FN(functionalization::fake_quantize_per_tensor_affine_cachemask_out_out)); |
22961 | m.impl("_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out" , TORCH_FN(functionalization::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out_out)); |
22962 | m.impl("_fake_quantize_learnable_per_tensor_affine.out" , TORCH_FN(functionalization::_fake_quantize_learnable_per_tensor_affine_out_out)); |
22963 | m.impl("fake_quantize_per_channel_affine_cachemask.out" , TORCH_FN(functionalization::fake_quantize_per_channel_affine_cachemask_out_out)); |
22964 | m.impl("lstm_mps_backward.out" , TORCH_FN(functionalization::lstm_mps_backward_out_out)); |
22965 | m.impl("lift_fresh_copy.out" , TORCH_FN(functionalization::lift_fresh_copy_out_out)); |
22966 | m.impl("_masked_softmax_backward.out" , TORCH_FN(functionalization::_masked_softmax_backward_out_out)); |
22967 | m.impl("put.out" , TORCH_FN(functionalization::put_out_out)); |
22968 | m.impl("put_" , TORCH_FN(functionalization::put_)); |
22969 | m.impl("index_add.out" , TORCH_FN(functionalization::index_add_out_out)); |
22970 | m.impl("index_add_" , TORCH_FN(functionalization::index_add_)); |
22971 | m.impl("index_fill.int_Scalar_out" , TORCH_FN(functionalization::index_fill_out_int_Scalar_out)); |
22972 | m.impl("index_fill_.int_Scalar" , TORCH_FN(functionalization::index_fill__int_Scalar)); |
22973 | m.impl("index_fill.int_Tensor_out" , TORCH_FN(functionalization::index_fill_out_int_Tensor_out)); |
22974 | m.impl("index_fill_.int_Tensor" , TORCH_FN(functionalization::index_fill__int_Tensor)); |
22975 | m.impl("scatter.src_out" , TORCH_FN(functionalization::scatter_out_src_out)); |
22976 | m.impl("scatter_.src" , TORCH_FN(functionalization::scatter__src)); |
22977 | m.impl("scatter.value_out" , TORCH_FN(functionalization::scatter_out_value_out)); |
22978 | m.impl("scatter_.value" , TORCH_FN(functionalization::scatter__value)); |
22979 | m.impl("scatter.reduce_out" , TORCH_FN(functionalization::scatter_out_reduce_out)); |
22980 | m.impl("scatter_.reduce" , TORCH_FN(functionalization::scatter__reduce)); |
22981 | m.impl("scatter.value_reduce_out" , TORCH_FN(functionalization::scatter_out_value_reduce_out)); |
22982 | m.impl("scatter_.value_reduce" , TORCH_FN(functionalization::scatter__value_reduce)); |
22983 | m.impl("scatter_add.out" , TORCH_FN(functionalization::scatter_add_out_out)); |
22984 | m.impl("scatter_add_" , TORCH_FN(functionalization::scatter_add_)); |
22985 | m.impl("__lshift__.Scalar_out" , TORCH_FN(functionalization::__lshift___out_Scalar_out)); |
22986 | m.impl("__ilshift__.Scalar" , TORCH_FN(functionalization::__ilshift___Scalar)); |
22987 | m.impl("__lshift__.Tensor_out" , TORCH_FN(functionalization::__lshift___out_Tensor_out)); |
22988 | m.impl("__ilshift__.Tensor" , TORCH_FN(functionalization::__ilshift___Tensor)); |
22989 | m.impl("greater_equal.Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::greater_equal)); |
22990 | m.impl("greater_equal.Scalar_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Scalar & other, at::Tensor & out)>(at::native::greater_equal_out)); |
22991 | m.impl("greater_equal_.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::greater_equal_)); |
22992 | m.impl("greater_equal.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::greater_equal)); |
22993 | m.impl("greater_equal.Tensor_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::greater_equal_out)); |
22994 | m.impl("greater_equal_.Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other)>(at::native::greater_equal_)); |
22995 | m.impl("less_equal.Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::less_equal)); |
22996 | m.impl("less_equal.Scalar_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Scalar & other, at::Tensor & out)>(at::native::less_equal_out)); |
22997 | m.impl("less_equal_.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::less_equal_)); |
22998 | m.impl("less_equal.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::less_equal)); |
22999 | m.impl("less_equal.Tensor_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::less_equal_out)); |
23000 | m.impl("less_equal_.Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other)>(at::native::less_equal_)); |
23001 | m.impl("greater.Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::greater)); |
23002 | m.impl("greater.Scalar_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Scalar & other, at::Tensor & out)>(at::native::greater_out)); |
23003 | m.impl("greater_.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::greater_)); |
23004 | m.impl("greater.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::greater)); |
23005 | m.impl("greater.Tensor_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::greater_out)); |
23006 | m.impl("greater_.Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other)>(at::native::greater_)); |
23007 | m.impl("lt.Scalar_out" , TORCH_FN(functionalization::lt_out_Scalar_out)); |
23008 | m.impl("lt_.Scalar" , TORCH_FN(functionalization::lt__Scalar)); |
23009 | m.impl("lt.Tensor_out" , TORCH_FN(functionalization::lt_out_Tensor_out)); |
23010 | m.impl("lt_.Tensor" , TORCH_FN(functionalization::lt__Tensor)); |
23011 | m.impl("less.Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::less)); |
23012 | m.impl("less.Scalar_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Scalar & other, at::Tensor & out)>(at::native::less_out)); |
23013 | m.impl("less_.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::less_)); |
23014 | m.impl("less.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::less)); |
23015 | m.impl("less.Tensor_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::less_out)); |
23016 | m.impl("less_.Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other)>(at::native::less_)); |
23017 | m.impl("take.out" , TORCH_FN(functionalization::take_out_out)); |
23018 | m.impl("gather.out" , TORCH_FN(functionalization::gather_out_out)); |
23019 | m.impl("gather.dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad)>(at::native::gather)); |
23020 | m.impl("gather.dimname_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out)>(at::native::gather_out)); |
23021 | m.impl("cholesky.out" , TORCH_FN(functionalization::cholesky_out_out)); |
23022 | m.impl("_cholesky_solve_helper.out" , TORCH_FN(functionalization::_cholesky_solve_helper_out_out)); |
23023 | m.impl("polygamma.out" , TORCH_FN(functionalization::polygamma_out_out)); |
23024 | m.impl("igamma.out" , TORCH_FN(functionalization::igamma_out_out)); |
23025 | m.impl("igamma_" , TORCH_FN(functionalization::igamma_)); |
23026 | m.impl("fmin.out" , TORCH_FN(functionalization::fmin_out_out)); |
23027 | m.impl("max.unary_out" , TORCH_FN(functionalization::max_out_unary_out)); |
23028 | m.impl("fmax.out" , TORCH_FN(functionalization::fmax_out_out)); |
23029 | m.impl("maximum.out" , TORCH_FN(functionalization::maximum_out_out)); |
23030 | m.impl("max.other" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::max)); |
23031 | m.impl("max.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::max_out)); |
23032 | m.impl("_amp_foreach_non_finite_check_and_unscale.out" , TORCH_FN(functionalization::_amp_foreach_non_finite_check_and_unscale_out_out)); |
23033 | m.impl("_amp_foreach_non_finite_check_and_unscale_" , TORCH_FN(functionalization::_amp_foreach_non_finite_check_and_unscale_)); |
23034 | m.impl("_foreach_sub.Scalar_out" , TORCH_FN(functionalization::_foreach_sub_out_Scalar_out)); |
23035 | m.impl("_foreach_sub_.Scalar" , TORCH_FN(functionalization::_foreach_sub__Scalar)); |
23036 | m.impl("_foreach_maximum.Scalar_out" , TORCH_FN(functionalization::_foreach_maximum_out_Scalar_out)); |
23037 | m.impl("_foreach_maximum_.Scalar" , TORCH_FN(functionalization::_foreach_maximum__Scalar)); |
23038 | m.impl("_foreach_sub.List_out" , TORCH_FN(functionalization::_foreach_sub_out_List_out)); |
23039 | m.impl("_foreach_sub_.List" , TORCH_FN(functionalization::_foreach_sub__List)); |
23040 | m.impl("_foreach_maximum.List_out" , TORCH_FN(functionalization::_foreach_maximum_out_List_out)); |
23041 | m.impl("_foreach_maximum_.List" , TORCH_FN(functionalization::_foreach_maximum__List)); |
23042 | m.impl("_foreach_sub.ScalarList_out" , TORCH_FN(functionalization::_foreach_sub_out_ScalarList_out)); |
23043 | m.impl("_foreach_sub_.ScalarList" , TORCH_FN(functionalization::_foreach_sub__ScalarList)); |
23044 | m.impl("_foreach_maximum.ScalarList_out" , TORCH_FN(functionalization::_foreach_maximum_out_ScalarList_out)); |
23045 | m.impl("_foreach_maximum_.ScalarList" , TORCH_FN(functionalization::_foreach_maximum__ScalarList)); |
23046 | m.impl("_foreach_exp.out" , TORCH_FN(functionalization::_foreach_exp_out_out)); |
23047 | m.impl("_foreach_exp_" , TORCH_FN(functionalization::_foreach_exp_)); |
23048 | m.impl("_foreach_sqrt.out" , TORCH_FN(functionalization::_foreach_sqrt_out_out)); |
23049 | m.impl("_foreach_sqrt_" , TORCH_FN(functionalization::_foreach_sqrt_)); |
23050 | m.impl("_foreach_abs.out" , TORCH_FN(functionalization::_foreach_abs_out_out)); |
23051 | m.impl("_foreach_abs_" , TORCH_FN(functionalization::_foreach_abs_)); |
23052 | m.impl("_foreach_acos.out" , TORCH_FN(functionalization::_foreach_acos_out_out)); |
23053 | m.impl("_foreach_acos_" , TORCH_FN(functionalization::_foreach_acos_)); |
23054 | m.impl("_foreach_cos.out" , TORCH_FN(functionalization::_foreach_cos_out_out)); |
23055 | m.impl("_foreach_cos_" , TORCH_FN(functionalization::_foreach_cos_)); |
23056 | m.impl("_foreach_floor.out" , TORCH_FN(functionalization::_foreach_floor_out_out)); |
23057 | m.impl("_foreach_floor_" , TORCH_FN(functionalization::_foreach_floor_)); |
23058 | m.impl("_foreach_log10.out" , TORCH_FN(functionalization::_foreach_log10_out_out)); |
23059 | m.impl("_foreach_log10_" , TORCH_FN(functionalization::_foreach_log10_)); |
23060 | m.impl("_foreach_neg.out" , TORCH_FN(functionalization::_foreach_neg_out_out)); |
23061 | m.impl("_foreach_neg_" , TORCH_FN(functionalization::_foreach_neg_)); |
23062 | m.impl("_foreach_tan.out" , TORCH_FN(functionalization::_foreach_tan_out_out)); |
23063 | m.impl("_foreach_tan_" , TORCH_FN(functionalization::_foreach_tan_)); |
23064 | m.impl("_foreach_sigmoid.out" , TORCH_FN(functionalization::_foreach_sigmoid_out_out)); |
23065 | m.impl("_foreach_sigmoid_" , TORCH_FN(functionalization::_foreach_sigmoid_)); |
23066 | m.impl("_foreach_norm.Scalar_out" , TORCH_FN(functionalization::_foreach_norm_out_Scalar_out)); |
23067 | m.impl("searchsorted.Tensor_out" , TORCH_FN(functionalization::searchsorted_out_Tensor_out)); |
23068 | m.impl("searchsorted.Scalar_out" , TORCH_FN(functionalization::searchsorted_out_Scalar_out)); |
23069 | m.impl("mse_loss_backward.grad_input" , TORCH_FN(functionalization::mse_loss_backward_out_grad_input)); |
23070 | m.impl("smooth_l1_loss_backward.grad_input" , TORCH_FN(functionalization::smooth_l1_loss_backward_out_grad_input)); |
23071 | m.impl("huber_loss_backward.out" , TORCH_FN(functionalization::huber_loss_backward_out_out)); |
23072 | m.impl("elu_backward.grad_input" , TORCH_FN(functionalization::elu_backward_out_grad_input)); |
23073 | m.impl("glu_jvp.out" , TORCH_FN(functionalization::glu_jvp_out_out)); |
23074 | m.impl("hardsigmoid_backward.grad_input" , TORCH_FN(functionalization::hardsigmoid_backward_out_grad_input)); |
23075 | m.impl("log_sigmoid" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::log_sigmoid)); |
23076 | m.impl("log_sigmoid.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::log_sigmoid_out)); |
23077 | m.impl("log_sigmoid_forward.output" , TORCH_FN(functionalization::log_sigmoid_forward_out_output)); |
23078 | m.impl("rrelu_with_noise.out" , TORCH_FN(functionalization::rrelu_with_noise_out_out)); |
23079 | m.impl("rrelu_with_noise_" , TORCH_FN(functionalization::rrelu_with_noise_)); |
23080 | m.impl("rrelu_with_noise_backward.out" , TORCH_FN(functionalization::rrelu_with_noise_backward_out_out)); |
23081 | m.impl("softplus_backward.grad_input" , TORCH_FN(functionalization::softplus_backward_out_grad_input)); |
23082 | m.impl("mkldnn_adaptive_avg_pool2d.out" , TORCH_FN(functionalization::mkldnn_adaptive_avg_pool2d_out_out)); |
23083 | m.impl("adaptive_max_pool2d.out" , TORCH_FN(functionalization::adaptive_max_pool2d_out_out)); |
23084 | m.impl("adaptive_max_pool3d.out" , TORCH_FN(functionalization::adaptive_max_pool3d_out_out)); |
23085 | m.impl("avg_pool2d_backward.grad_input" , TORCH_FN(functionalization::avg_pool2d_backward_out_grad_input)); |
23086 | m.impl("max_pool2d_with_indices.out" , TORCH_FN(functionalization::max_pool2d_with_indices_out_out)); |
23087 | m.impl("upsample_linear1d.out" , TORCH_FN(functionalization::upsample_linear1d_out_out)); |
23088 | m.impl("upsample_linear1d_backward.grad_input" , TORCH_FN(functionalization::upsample_linear1d_backward_out_grad_input)); |
23089 | m.impl("upsample_bicubic2d.out" , TORCH_FN(functionalization::upsample_bicubic2d_out_out)); |
23090 | m.impl("upsample_bicubic2d_backward.grad_input" , TORCH_FN(functionalization::upsample_bicubic2d_backward_out_grad_input)); |
23091 | m.impl("_upsample_bicubic2d_aa_backward.grad_input" , TORCH_FN(functionalization::_upsample_bicubic2d_aa_backward_out_grad_input)); |
23092 | m.impl("upsample_nearest1d.out" , TORCH_FN(functionalization::upsample_nearest1d_out_out)); |
23093 | m.impl("_upsample_nearest_exact1d.out" , TORCH_FN(functionalization::_upsample_nearest_exact1d_out_out)); |
23094 | m.impl("upsample_nearest1d_backward.grad_input" , TORCH_FN(functionalization::upsample_nearest1d_backward_out_grad_input)); |
23095 | m.impl("upsample_nearest3d.out" , TORCH_FN(functionalization::upsample_nearest3d_out_out)); |
23096 | m.impl("slow_conv_transpose3d.out" , TORCH_FN(functionalization::slow_conv_transpose3d_out_out)); |
23097 | m.impl("isposinf.out" , TORCH_FN(functionalization::isposinf_out_out)); |
23098 | m.impl("special_entr.out" , TORCH_FN(functionalization::special_entr_out_out)); |
23099 | m.impl("special_psi" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::special_psi)); |
23100 | m.impl("special_psi.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::special_psi_out)); |
23101 | m.impl("special_erfinv" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::special_erfinv)); |
23102 | m.impl("special_erfinv.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::special_erfinv_out)); |
23103 | m.impl("special_ndtr" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::special_ndtr)); |
23104 | m.impl("special_ndtr.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::special_ndtr_out)); |
23105 | m.impl("special_xlogy" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::special_xlogy)); |
23106 | m.impl("special_xlogy.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::special_xlogy_out)); |
23107 | m.impl("special_xlogy.self_scalar" , static_cast<at::Tensor (*)(const at::Scalar & self, const at::Tensor & other)>(at::native::special_xlogy)); |
23108 | m.impl("special_xlogy.self_scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & self, const at::Tensor & other, at::Tensor & out)>(at::native::special_xlogy_out)); |
23109 | m.impl("special_xlogy.other_scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::special_xlogy)); |
23110 | m.impl("special_xlogy.other_scalar_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Scalar & other, at::Tensor & out)>(at::native::special_xlogy_out)); |
23111 | m.impl("special_zeta.out" , TORCH_FN(functionalization::special_zeta_out_out)); |
23112 | m.impl("special_zeta.self_scalar_out" , TORCH_FN(functionalization::special_zeta_out_self_scalar_out)); |
23113 | m.impl("special_zeta.other_scalar_out" , TORCH_FN(functionalization::special_zeta_out_other_scalar_out)); |
23114 | m.impl("special_i0" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::special_i0)); |
23115 | m.impl("special_i0.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::special_i0_out)); |
23116 | m.impl("special_i0e.out" , TORCH_FN(functionalization::special_i0e_out_out)); |
23117 | m.impl("special_expit" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::special_expit)); |
23118 | m.impl("special_expit.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::special_expit_out)); |
23119 | m.impl("special_round" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t decimals)>(at::native::special_round)); |
23120 | m.impl("special_round.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, int64_t decimals, at::Tensor & out)>(at::native::special_round_out)); |
23121 | m.impl("special_gammainc" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::special_gammainc)); |
23122 | m.impl("special_gammainc.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::special_gammainc_out)); |
23123 | m.impl("fft_irfft" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm)>(at::native::fft_irfft)); |
23124 | m.impl("fft_irfft.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_irfft_out)); |
23125 | m.impl("fft_fft2" , static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm)>(at::native::fft_fft2)); |
23126 | m.impl("fft_fft2.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_fft2_out)); |
23127 | m.impl("fft_rfftfreq.out" , TORCH_FN(functionalization::fft_rfftfreq_out_out)); |
23128 | m.impl("linalg_cholesky" , static_cast<at::Tensor (*)(const at::Tensor & self, bool upper)>(at::native::linalg_cholesky)); |
23129 | m.impl("linalg_cholesky.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, bool upper, at::Tensor & out)>(at::native::linalg_cholesky_out)); |
23130 | m.impl("linalg_lu.out" , TORCH_FN(functionalization::linalg_lu_out_out)); |
23131 | m.impl("_linalg_det.result" , TORCH_FN(functionalization::_linalg_det_out_result)); |
23132 | m.impl("slogdet" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self)>(at::native::slogdet)); |
23133 | m.impl("slogdet.out" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet)>(at::native::slogdet_out)); |
23134 | m.impl("linalg_eig.out" , TORCH_FN(functionalization::linalg_eig_out_out)); |
23135 | m.impl("linalg_inv" , static_cast<at::Tensor (*)(const at::Tensor & A)>(at::native::linalg_inv)); |
23136 | m.impl("linalg_inv.out" , static_cast<at::Tensor & (*)(const at::Tensor & A, at::Tensor & out)>(at::native::linalg_inv_out)); |
23137 | m.impl("_linalg_svd.U" , TORCH_FN(functionalization::_linalg_svd_out_U)); |
23138 | m.impl("linalg_svdvals" , static_cast<at::Tensor (*)(const at::Tensor & A, c10::optional<c10::string_view> driver)>(at::native::linalg_svdvals)); |
23139 | m.impl("linalg_svdvals.out" , static_cast<at::Tensor & (*)(const at::Tensor & A, c10::optional<c10::string_view> driver, at::Tensor & out)>(at::native::linalg_svdvals_out)); |
23140 | m.impl("linalg_pinv.atol_rtol_tensor_out" , TORCH_FN(functionalization::linalg_pinv_out_atol_rtol_tensor_out)); |
23141 | m.impl("linalg_pinv.atol_rtol_float" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian)>(at::native::linalg_pinv)); |
23142 | m.impl("linalg_pinv.atol_rtol_float_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out)>(at::native::linalg_pinv_out)); |
23143 | m.impl("linalg_pinv" , static_cast<at::Tensor (*)(const at::Tensor & self, double rcond, bool hermitian)>(at::native::linalg_pinv)); |
23144 | m.impl("linalg_pinv.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out)>(at::native::linalg_pinv_out)); |
23145 | m.impl("linalg_pinv.rcond_tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & rcond, bool hermitian)>(at::native::linalg_pinv)); |
23146 | m.impl("linalg_pinv.out_rcond_tensor" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out)>(at::native::linalg_pinv_out)); |
23147 | m.impl("linalg_qr.out" , TORCH_FN(functionalization::linalg_qr_out_out)); |
23148 | m.impl("_test_optional_intlist.out" , TORCH_FN(functionalization::_test_optional_intlist_out_out)); |
23149 | m.impl("_fw_primal_copy.out" , TORCH_FN(functionalization::_fw_primal_copy_out_out)); |
23150 | m.impl("as_strided_copy.out" , TORCH_FN(functionalization::as_strided_copy_out_out)); |
23151 | m.impl("expand_copy.out" , TORCH_FN(functionalization::expand_copy_out_out)); |
23152 | m.impl("_reshape_alias_copy.out" , TORCH_FN(functionalization::_reshape_alias_copy_out_out)); |
23153 | m.impl("select_copy.int_out" , TORCH_FN(functionalization::select_copy_out_int_out)); |
23154 | m.impl("split_with_sizes_copy.out" , TORCH_FN(functionalization::split_with_sizes_copy_out_out)); |
23155 | m.impl("squeeze_copy.out" , TORCH_FN(functionalization::squeeze_copy_out_out)); |
23156 | m.impl("squeeze_copy.dim_out" , TORCH_FN(functionalization::squeeze_copy_out_dim_out)); |
23157 | m.impl("squeeze_copy.dims_out" , TORCH_FN(functionalization::squeeze_copy_out_dims_out)); |
23158 | m.impl("_indices_copy.out" , TORCH_FN(functionalization::_indices_copy_out_out)); |
23159 | m.impl("_values_copy.out" , TORCH_FN(functionalization::_values_copy_out_out)); |
23160 | m.impl("crow_indices_copy.out" , TORCH_FN(functionalization::crow_indices_copy_out_out)); |
23161 | m.impl("col_indices_copy.out" , TORCH_FN(functionalization::col_indices_copy_out_out)); |
23162 | m.impl("unbind_copy.int_out" , TORCH_FN(functionalization::unbind_copy_out_int_out)); |
23163 | m.impl("view_copy.out" , TORCH_FN(functionalization::view_copy_out_out)); |
23164 | m.impl("view_copy.dtype_out" , TORCH_FN(functionalization::view_copy_out_dtype_out)); |
23165 | m.impl("alias_copy.out" , TORCH_FN(functionalization::alias_copy_out_out)); |
23166 | m.impl("special_airy_ai.out" , TORCH_FN(functionalization::special_airy_ai_out_out)); |
23167 | m.impl("special_bessel_j0.out" , TORCH_FN(functionalization::special_bessel_j0_out_out)); |
23168 | m.impl("special_chebyshev_polynomial_v.out" , TORCH_FN(functionalization::special_chebyshev_polynomial_v_out_out)); |
23169 | m.impl("special_chebyshev_polynomial_v.x_scalar" , static_cast<at::Tensor (*)(const at::Scalar & x, const at::Tensor & n)>(at::native::special_chebyshev_polynomial_v)); |
23170 | m.impl("special_chebyshev_polynomial_v.x_scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & x, const at::Tensor & n, at::Tensor & out)>(at::native::special_chebyshev_polynomial_v_out)); |
23171 | m.impl("special_chebyshev_polynomial_v.n_scalar" , static_cast<at::Tensor (*)(const at::Tensor & x, const at::Scalar & n)>(at::native::special_chebyshev_polynomial_v)); |
23172 | m.impl("special_chebyshev_polynomial_v.n_scalar_out" , TORCH_FN(functionalization::special_chebyshev_polynomial_v_out_n_scalar_out)); |
23173 | m.impl("special_chebyshev_polynomial_w.out" , TORCH_FN(functionalization::special_chebyshev_polynomial_w_out_out)); |
23174 | m.impl("special_chebyshev_polynomial_w.x_scalar" , static_cast<at::Tensor (*)(const at::Scalar & x, const at::Tensor & n)>(at::native::special_chebyshev_polynomial_w)); |
23175 | m.impl("special_chebyshev_polynomial_w.x_scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & x, const at::Tensor & n, at::Tensor & out)>(at::native::special_chebyshev_polynomial_w_out)); |
23176 | m.impl("special_chebyshev_polynomial_w.n_scalar" , static_cast<at::Tensor (*)(const at::Tensor & x, const at::Scalar & n)>(at::native::special_chebyshev_polynomial_w)); |
23177 | m.impl("special_chebyshev_polynomial_w.n_scalar_out" , TORCH_FN(functionalization::special_chebyshev_polynomial_w_out_n_scalar_out)); |
23178 | m.impl("special_hermite_polynomial_he.out" , TORCH_FN(functionalization::special_hermite_polynomial_he_out_out)); |
23179 | m.impl("special_hermite_polynomial_he.x_scalar" , static_cast<at::Tensor (*)(const at::Scalar & x, const at::Tensor & n)>(at::native::special_hermite_polynomial_he)); |
23180 | m.impl("special_hermite_polynomial_he.x_scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & x, const at::Tensor & n, at::Tensor & out)>(at::native::special_hermite_polynomial_he_out)); |
23181 | m.impl("special_hermite_polynomial_he.n_scalar" , static_cast<at::Tensor (*)(const at::Tensor & x, const at::Scalar & n)>(at::native::special_hermite_polynomial_he)); |
23182 | m.impl("special_hermite_polynomial_he.n_scalar_out" , TORCH_FN(functionalization::special_hermite_polynomial_he_out_n_scalar_out)); |
23183 | m.impl("special_laguerre_polynomial_l.out" , TORCH_FN(functionalization::special_laguerre_polynomial_l_out_out)); |
23184 | m.impl("special_laguerre_polynomial_l.x_scalar" , static_cast<at::Tensor (*)(const at::Scalar & x, const at::Tensor & n)>(at::native::special_laguerre_polynomial_l)); |
23185 | m.impl("special_laguerre_polynomial_l.x_scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & x, const at::Tensor & n, at::Tensor & out)>(at::native::special_laguerre_polynomial_l_out)); |
23186 | m.impl("special_laguerre_polynomial_l.n_scalar" , static_cast<at::Tensor (*)(const at::Tensor & x, const at::Scalar & n)>(at::native::special_laguerre_polynomial_l)); |
23187 | m.impl("special_laguerre_polynomial_l.n_scalar_out" , TORCH_FN(functionalization::special_laguerre_polynomial_l_out_n_scalar_out)); |
23188 | m.impl("special_shifted_chebyshev_polynomial_v.out" , TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_v_out_out)); |
23189 | m.impl("special_shifted_chebyshev_polynomial_v.x_scalar" , static_cast<at::Tensor (*)(const at::Scalar & x, const at::Tensor & n)>(at::native::special_shifted_chebyshev_polynomial_v)); |
23190 | m.impl("special_shifted_chebyshev_polynomial_v.x_scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & x, const at::Tensor & n, at::Tensor & out)>(at::native::special_shifted_chebyshev_polynomial_v_out)); |
23191 | m.impl("special_shifted_chebyshev_polynomial_v.n_scalar" , static_cast<at::Tensor (*)(const at::Tensor & x, const at::Scalar & n)>(at::native::special_shifted_chebyshev_polynomial_v)); |
23192 | m.impl("special_shifted_chebyshev_polynomial_v.n_scalar_out" , TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_v_out_n_scalar_out)); |
23193 | m.impl("_fused_adamw.out" , TORCH_FN(functionalization::_fused_adamw_out_out)); |
23194 | m.impl("_fused_adamw_" , TORCH_FN(functionalization::_fused_adamw_)); |
23195 | m.impl("_fw_primal" , TORCH_FN(functionalization::_fw_primal)); |
23196 | m.impl("_make_dual" , TORCH_FN(functionalization::_make_dual)); |
23197 | m.impl("_unpack_dual" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & dual, int64_t level)>(at::native::_unpack_dual)); |
23198 | m.impl("align_to" , static_cast<at::Tensor (*)(const at::Tensor & self, at::DimnameList names)>(at::native::align_to)); |
23199 | m.impl("align_to.ellipsis_idx" , static_cast<at::Tensor (*)(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx)>(at::native::align_to)); |
23200 | m.impl("view_as_complex" , TORCH_FN(functionalization::view_as_complex)); |
23201 | m.impl("resolve_neg" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::resolve_neg)); |
23202 | m.impl("linalg_diagonal" , static_cast<at::Tensor (*)(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2)>(at::native::linalg_diagonal)); |
23203 | m.impl("expand" , TORCH_FN(functionalization::expand)); |
23204 | m.impl("matrix_H" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::matrix_H)); |
23205 | m.impl("adjoint" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::adjoint)); |
23206 | m.impl("reshape_as" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::reshape_as)); |
23207 | m.impl("transpose.int" , TORCH_FN(functionalization::transpose_int)); |
23208 | m.impl("transpose_" , TORCH_FN(functionalization::transpose_)); |
23209 | m.impl("transpose.Dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1)>(at::native::transpose)); |
23210 | m.impl("_nested_view_from_buffer" , TORCH_FN(functionalization::_nested_view_from_buffer)); |
23211 | m.impl("unsqueeze" , TORCH_FN(functionalization::unsqueeze)); |
23212 | m.impl("unsqueeze_" , TORCH_FN(functionalization::unsqueeze_)); |
23213 | m.impl("_values" , TORCH_FN(functionalization::_values)); |
23214 | m.impl("ccol_indices" , TORCH_FN(functionalization::ccol_indices)); |
23215 | m.impl("_autocast_to_full_precision" , static_cast<at::Tensor (*)(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled)>(at::native::_autocast_to_full_precision)); |
23216 | m.impl("to.dtype_layout" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format)>(at::native::to)); |
23217 | m.impl("to.device" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format)>(at::native::to)); |
23218 | m.impl("to.dtype" , static_cast<at::Tensor (*)(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format)>(at::native::to)); |
23219 | m.impl("to.other" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format)>(at::native::to)); |
23220 | m.impl("view" , TORCH_FN(functionalization::view)); |
23221 | m.impl("view.dtype" , TORCH_FN(functionalization::view_dtype)); |
23222 | m.impl("_cast_Long" , static_cast<at::Tensor (*)(const at::Tensor & self, bool non_blocking)>(at::native::_cast_Long)); |
23223 | m.impl("_version" , static_cast<int64_t (*)(const at::Tensor & self)>(at::native::_version)); |
23224 | m.impl("retain_grad" , static_cast<void (*)(at::Tensor & self)>(at::native::retain_grad)); |
23225 | m.impl("retains_grad" , static_cast<bool (*)(const at::Tensor & self)>(at::native::retains_grad)); |
23226 | m.impl("align_tensors" , static_cast<::std::vector<at::Tensor> (*)(at::TensorList tensors)>(at::native::align_tensors)); |
23227 | m.impl("_assert_tensor_metadata" , static_cast<void (*)(const at::Tensor & a, at::OptionalIntArrayRef size, at::OptionalIntArrayRef stride, c10::optional<at::ScalarType> dtype)>(at::native::_assert_tensor_metadata)); |
23228 | m.impl("_debug_has_internal_overlap" , static_cast<int64_t (*)(const at::Tensor & self)>(at::native::_debug_has_internal_overlap)); |
23229 | m.impl("_sobol_engine_draw" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<at::ScalarType> dtype)>(at::native::_sobol_engine_draw)); |
23230 | m.impl("_sobol_engine_scramble_" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & ltm, int64_t dimension)>(at::native::_sobol_engine_scramble_)); |
23231 | m.impl("feature_dropout" , static_cast<at::Tensor (*)(const at::Tensor & input, double p, bool train)>(at::native::feature_dropout)); |
23232 | m.impl("feature_dropout_" , static_cast<at::Tensor & (*)(at::Tensor & self, double p, bool train)>(at::native::feature_dropout_)); |
23233 | m.impl("alpha_dropout" , static_cast<at::Tensor (*)(const at::Tensor & input, double p, bool train)>(at::native::alpha_dropout)); |
23234 | m.impl("alpha_dropout_" , static_cast<at::Tensor & (*)(at::Tensor & self, double p, bool train)>(at::native::alpha_dropout_)); |
23235 | m.impl("chalf" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format)>(at::native::chalf)); |
23236 | m.impl("avg_pool1d" , static_cast<at::Tensor (*)(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad)>(at::native::avg_pool1d)); |
23237 | m.impl("adaptive_avg_pool1d" , static_cast<at::Tensor (*)(const at::Tensor & self, at::IntArrayRef output_size)>(at::native::adaptive_avg_pool1d)); |
23238 | m.impl("affine_grid_generator_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad, at::IntArrayRef size, bool align_corners)>(at::native::affine_grid_generator_backward)); |
23239 | m.impl("_test_check_tensor" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::_test_check_tensor)); |
23240 | m.impl("atleast_3d" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::atleast_3d)); |
23241 | m.impl("atleast_3d.Sequence" , static_cast<::std::vector<at::Tensor> (*)(at::TensorList tensors)>(at::native::atleast_3d)); |
23242 | m.impl("batch_norm" , static_cast<at::Tensor (*)(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled)>(at::native::batch_norm)); |
23243 | m.impl("broadcast_tensors" , static_cast<::std::vector<at::Tensor> (*)(at::TensorList tensors)>(at::native::broadcast_tensors)); |
23244 | m.impl("_convolution_mode" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups)>(at::native::_convolution_mode)); |
23245 | m.impl("conv3d" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups)>(at::native::conv3d)); |
23246 | m.impl("conv3d.padding" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups)>(at::native::conv3d)); |
23247 | m.impl("diagflat" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t offset)>(at::native::diagflat)); |
23248 | m.impl("fill_diagonal_" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & fill_value, bool wrap)>(at::native::fill_diagonal_)); |
23249 | m.impl("index_copy_.dimname" , static_cast<at::Tensor & (*)(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source)>(at::native::index_copy_)); |
23250 | m.impl("index_copy.dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source)>(at::native::index_copy)); |
23251 | m.impl("instance_norm" , static_cast<at::Tensor (*)(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled)>(at::native::instance_norm)); |
23252 | m.impl("is_complex" , static_cast<bool (*)(const at::Tensor & self)>(at::native::is_complex)); |
23253 | m.impl("fbgemm_linear_int8_weight_fp32_activation" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias)>(at::native::fbgemm_linear_int8_weight_fp32_activation)); |
23254 | m.impl("fbgemm_linear_int8_weight" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias)>(at::native::fbgemm_linear_int8_weight)); |
23255 | m.impl("fbgemm_linear_quantize_weight" , static_cast<::std::tuple<at::Tensor,at::Tensor,double,int64_t> (*)(const at::Tensor & input)>(at::native::fbgemm_linear_quantize_weight)); |
23256 | m.impl("fbgemm_linear_fp16_weight" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias)>(at::native::fbgemm_linear_fp16_weight)); |
23257 | m.impl("log_softmax.Dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype)>(at::native::log_softmax)); |
23258 | m.impl("max_pool1d_with_indices" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode)>(at::native::max_pool1d_with_indices)); |
23259 | m.impl("max_pool3d" , static_cast<at::Tensor (*)(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode)>(at::native::max_pool3d)); |
23260 | m.impl("multiply.Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::multiply)); |
23261 | m.impl("multiply_.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::multiply_)); |
23262 | m.impl("is_vulkan_available" , static_cast<bool (*)()>(at::native::is_vulkan_available)); |
23263 | m.impl("cdist" , static_cast<at::Tensor (*)(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode)>(at::native::cdist)); |
23264 | m.impl("poisson_nll_loss" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction)>(at::native::poisson_nll_loss)); |
23265 | m.impl("repeat_interleave.self_Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size)>(at::native::repeat_interleave)); |
23266 | m.impl("repeat_interleave.self_int" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::SymInt repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size)>(at::native::repeat_interleave_symint)); |
23267 | m.impl("infinitely_differentiable_gelu_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad, const at::Tensor & self)>(at::native::infinitely_differentiable_gelu_backward)); |
23268 | m.impl("mish_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad_output, const at::Tensor & self)>(at::native::math_mish_backward)); |
23269 | m.impl("subtract.Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha)>(at::native::subtract)); |
23270 | m.impl("subtract_.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha)>(at::native::subtract_)); |
23271 | m.impl("sparse_compressed_tensor.comp_plain_value_size" , static_cast<at::Tensor (*)(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::sparse_compressed_tensor)); |
23272 | m.impl("sparse_bsr_tensor.crow_col_value_size" , static_cast<at::Tensor (*)(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::sparse_bsr_tensor)); |
23273 | m.impl("sparse_compressed_tensor.comp_plain_value" , static_cast<at::Tensor (*)(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::sparse_compressed_tensor)); |
23274 | m.impl("sparse_bsr_tensor.crow_col_value" , static_cast<at::Tensor (*)(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::sparse_bsr_tensor)); |
23275 | m.impl("_sparse_csc_tensor_unsafe" , static_cast<at::Tensor (*)(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::_sparse_csc_tensor_unsafe)); |
23276 | m.impl("_sparse_coo_tensor_unsafe" , static_cast<at::Tensor (*)(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::_sparse_coo_tensor_unsafe_symint)); |
23277 | m.impl("_validate_sparse_coo_tensor_args" , static_cast<void (*)(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size)>(at::native::_validate_sparse_coo_tensor_args)); |
23278 | m.impl("to_dense" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<at::ScalarType> dtype)>(at::native::to_dense)); |
23279 | m.impl("to_dense_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad, const at::Tensor & input)>(at::native::to_dense_backward)); |
23280 | m.impl("fake_quantize_per_channel_affine" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max)>(at::native::fake_quantize_per_channel_affine)); |
23281 | m.impl("choose_qparams_optimized" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width)>(at::native::choose_qparams_optimized)); |
23282 | m.impl("combinations" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t r, bool with_replacement)>(at::native::combinations)); |
23283 | m.impl("result_type.Tensor" , static_cast<at::ScalarType (*)(const at::Tensor & tensor, const at::Tensor & other)>(at::native::result_type)); |
23284 | m.impl("result_type.Scalar" , static_cast<at::ScalarType (*)(const at::Tensor & tensor, const at::Scalar & other)>(at::native::result_type)); |
23285 | m.impl("result_type.Scalar_Tensor" , static_cast<at::ScalarType (*)(const at::Scalar & scalar, const at::Tensor & tensor)>(at::native::result_type)); |
23286 | m.impl("result_type.Scalar_Scalar" , static_cast<at::ScalarType (*)(const at::Scalar & scalar1, const at::Scalar & scalar2)>(at::native::result_type)); |
23287 | m.impl("_thnn_differentiable_gru_cell_backward" , static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> (*)(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias)>(at::native::_thnn_differentiable_gru_cell_backward)); |
23288 | m.impl("lstm.input" , static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor> (*)(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first)>(at::native::lstm)); |
23289 | m.impl("lstm.data" , static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor> (*)(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional)>(at::native::lstm)); |
23290 | m.impl("gru.input" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first)>(at::native::gru)); |
23291 | m.impl("gru.data" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional)>(at::native::gru)); |
23292 | m.impl("lstm_cell" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh)>(at::native::lstm_cell)); |
23293 | m.impl("quantized_gru_cell" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh)>(at::native::quantized_gru_cell)); |
23294 | m.impl("quantized_rnn_tanh_cell" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh)>(at::native::quantized_rnn_tanh_cell)); |
23295 | m.impl("index_add.dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha)>(at::native::index_add)); |
23296 | m.impl("index_fill_.Dimname_Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value)>(at::native::index_fill_)); |
23297 | m.impl("index_fill_.Dimname_Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value)>(at::native::index_fill_)); |
23298 | m.impl("index_fill.Dimname_Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value)>(at::native::index_fill)); |
23299 | m.impl("index_fill.Dimname_Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value)>(at::native::index_fill)); |
23300 | m.impl("scatter.dimname_src" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src)>(at::native::scatter)); |
23301 | m.impl("scatter.dimname_value" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value)>(at::native::scatter)); |
23302 | m.impl("scatter_add.dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src)>(at::native::scatter_add)); |
23303 | m.impl("__and__.Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::__and__)); |
23304 | m.impl("__and__.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::__and__)); |
23305 | m.impl("__iand__.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::__iand__)); |
23306 | m.impl("__iand__.Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other)>(at::native::__iand__)); |
23307 | m.impl("__or__.Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::__or__)); |
23308 | m.impl("__or__.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::__or__)); |
23309 | m.impl("__ior__.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::__ior__)); |
23310 | m.impl("__ior__.Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other)>(at::native::__ior__)); |
23311 | m.impl("index_select_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index)>(at::native::index_select_backward_symint)); |
23312 | m.impl("nonzero_numpy" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self)>(at::native::nonzero_numpy)); |
23313 | m.impl("_lu_with_info" , static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor> (*)(const at::Tensor & self, bool pivot, bool check_errors)>(at::native::_lu_with_info)); |
23314 | m.impl("nll_loss_nd" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index)>(at::native::nll_loss_nd_symint)); |
23315 | m.impl("upsample_linear1d.vec" , static_cast<at::Tensor (*)(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors)>(at::native::upsample_linear1d)); |
23316 | m.impl("upsample_bicubic2d.vec" , static_cast<at::Tensor (*)(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors)>(at::native::upsample_bicubic2d)); |
23317 | m.impl("upsample_nearest1d.vec" , static_cast<at::Tensor (*)(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors)>(at::native::upsample_nearest1d)); |
23318 | m.impl("_upsample_nearest_exact1d.vec" , static_cast<at::Tensor (*)(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors)>(at::native::_upsample_nearest_exact1d)); |
23319 | m.impl("upsample_nearest3d.vec" , static_cast<at::Tensor (*)(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors)>(at::native::upsample_nearest3d)); |
23320 | m.impl("det" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::det)); |
23321 | m.impl("logdet" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::logdet)); |
23322 | m.impl("_test_string_default" , static_cast<at::Tensor (*)(const at::Tensor & dummy, c10::string_view a, c10::string_view b)>(at::native::_test_string_default));; |
23323 | } |
23324 | |
23325 | } // namespace |
23326 | |
23327 | } // namespace at |
23328 | |