1 | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS |
2 | // @generated by torchgen/gen.py from RegisterFunctionalization.cpp |
3 | |
4 | #include <ATen/core/LegacyTypeDispatch.h> |
5 | #include <ATen/EmptyTensor.h> |
6 | #include <ATen/FunctionalTensorWrapper.h> |
7 | #include <ATen/FunctionalInverses.h> |
8 | #include <torch/library.h> |
9 | |
10 | #ifndef AT_PER_OPERATOR_HEADERS |
11 | #include <ATen/Operators.h> |
12 | #include <ATen/NativeFunctions.h> |
13 | #else |
14 | // needed for the meta tensor calls to get stride info in functionalization |
15 | #include <ATen/ops/empty_strided_native.h> |
16 | // needed for special handling of copy_(). |
17 | // See Note [functionalizating copy_() and not preserving strides] |
18 | #include <ATen/ops/to_ops.h> |
19 | #include <ATen/ops/expand_copy_ops.h> |
20 | |
21 | #include <ATen/ops/_cudnn_ctc_loss_native.h> |
22 | #include <ATen/ops/_cudnn_ctc_loss_ops.h> |
23 | #include <ATen/ops/_cudnn_ctc_loss_native.h> |
24 | #include <ATen/ops/_cudnn_ctc_loss_ops.h> |
25 | #include <ATen/ops/_cudnn_rnn_native.h> |
26 | #include <ATen/ops/_cudnn_rnn_ops.h> |
27 | #include <ATen/ops/_cudnn_rnn_native.h> |
28 | #include <ATen/ops/_cudnn_rnn_ops.h> |
29 | #include <ATen/ops/_cudnn_rnn_backward_native.h> |
30 | #include <ATen/ops/_cudnn_rnn_backward_ops.h> |
31 | #include <ATen/ops/_cudnn_rnn_backward_native.h> |
32 | #include <ATen/ops/_cudnn_rnn_backward_ops.h> |
33 | #include <ATen/ops/_fused_dropout_native.h> |
34 | #include <ATen/ops/_fused_dropout_ops.h> |
35 | #include <ATen/ops/_fused_dropout_native.h> |
36 | #include <ATen/ops/_fused_dropout_ops.h> |
37 | #include <ATen/ops/conj_physical_native.h> |
38 | #include <ATen/ops/conj_physical_ops.h> |
39 | #include <ATen/ops/conj_physical_native.h> |
40 | #include <ATen/ops/conj_physical_ops.h> |
41 | #include <ATen/ops/conj_physical_native.h> |
42 | #include <ATen/ops/conj_physical_ops.h> |
43 | #include <ATen/ops/add_native.h> |
44 | #include <ATen/ops/add_ops.h> |
45 | #include <ATen/ops/add_native.h> |
46 | #include <ATen/ops/add_ops.h> |
47 | #include <ATen/ops/add_native.h> |
48 | #include <ATen/ops/add_ops.h> |
49 | #include <ATen/ops/add_native.h> |
50 | #include <ATen/ops/add_ops.h> |
51 | #include <ATen/ops/add_native.h> |
52 | #include <ATen/ops/add_ops.h> |
53 | #include <ATen/ops/add_native.h> |
54 | #include <ATen/ops/add_ops.h> |
55 | #include <ATen/ops/addmv_native.h> |
56 | #include <ATen/ops/addmv_ops.h> |
57 | #include <ATen/ops/addmv_native.h> |
58 | #include <ATen/ops/addmv_ops.h> |
59 | #include <ATen/ops/addmv_native.h> |
60 | #include <ATen/ops/addmv_ops.h> |
61 | #include <ATen/ops/addr_native.h> |
62 | #include <ATen/ops/addr_ops.h> |
63 | #include <ATen/ops/addr_native.h> |
64 | #include <ATen/ops/addr_ops.h> |
65 | #include <ATen/ops/addr_native.h> |
66 | #include <ATen/ops/addr_ops.h> |
67 | #include <ATen/ops/all_native.h> |
68 | #include <ATen/ops/all_ops.h> |
69 | #include <ATen/ops/all_native.h> |
70 | #include <ATen/ops/all_ops.h> |
71 | #include <ATen/ops/all_native.h> |
72 | #include <ATen/ops/all_ops.h> |
73 | #include <ATen/ops/all_native.h> |
74 | #include <ATen/ops/all_ops.h> |
75 | #include <ATen/ops/argmax_native.h> |
76 | #include <ATen/ops/argmax_ops.h> |
77 | #include <ATen/ops/argmax_native.h> |
78 | #include <ATen/ops/argmax_ops.h> |
79 | #include <ATen/ops/atan_native.h> |
80 | #include <ATen/ops/atan_ops.h> |
81 | #include <ATen/ops/atan_native.h> |
82 | #include <ATen/ops/atan_ops.h> |
83 | #include <ATen/ops/atan_native.h> |
84 | #include <ATen/ops/atan_ops.h> |
85 | #include <ATen/ops/bartlett_window_native.h> |
86 | #include <ATen/ops/bartlett_window_ops.h> |
87 | #include <ATen/ops/bartlett_window_native.h> |
88 | #include <ATen/ops/bartlett_window_ops.h> |
89 | #include <ATen/ops/bartlett_window_native.h> |
90 | #include <ATen/ops/bartlett_window_ops.h> |
91 | #include <ATen/ops/bartlett_window_native.h> |
92 | #include <ATen/ops/bartlett_window_ops.h> |
93 | #include <ATen/ops/binary_cross_entropy_native.h> |
94 | #include <ATen/ops/binary_cross_entropy_ops.h> |
95 | #include <ATen/ops/binary_cross_entropy_native.h> |
96 | #include <ATen/ops/binary_cross_entropy_ops.h> |
97 | #include <ATen/ops/bitwise_not_native.h> |
98 | #include <ATen/ops/bitwise_not_ops.h> |
99 | #include <ATen/ops/bitwise_not_native.h> |
100 | #include <ATen/ops/bitwise_not_ops.h> |
101 | #include <ATen/ops/bitwise_not_native.h> |
102 | #include <ATen/ops/bitwise_not_ops.h> |
103 | #include <ATen/ops/logical_xor_native.h> |
104 | #include <ATen/ops/logical_xor_ops.h> |
105 | #include <ATen/ops/logical_xor_native.h> |
106 | #include <ATen/ops/logical_xor_ops.h> |
107 | #include <ATen/ops/logical_xor_native.h> |
108 | #include <ATen/ops/logical_xor_ops.h> |
109 | #include <ATen/ops/blackman_window_native.h> |
110 | #include <ATen/ops/blackman_window_ops.h> |
111 | #include <ATen/ops/blackman_window_native.h> |
112 | #include <ATen/ops/blackman_window_ops.h> |
113 | #include <ATen/ops/blackman_window_native.h> |
114 | #include <ATen/ops/blackman_window_ops.h> |
115 | #include <ATen/ops/blackman_window_native.h> |
116 | #include <ATen/ops/blackman_window_ops.h> |
117 | #include <ATen/ops/clamp_native.h> |
118 | #include <ATen/ops/clamp_ops.h> |
119 | #include <ATen/ops/clamp_native.h> |
120 | #include <ATen/ops/clamp_ops.h> |
121 | #include <ATen/ops/clamp_native.h> |
122 | #include <ATen/ops/clamp_ops.h> |
123 | #include <ATen/ops/clamp_native.h> |
124 | #include <ATen/ops/clamp_ops.h> |
125 | #include <ATen/ops/clamp_native.h> |
126 | #include <ATen/ops/clamp_ops.h> |
127 | #include <ATen/ops/clamp_native.h> |
128 | #include <ATen/ops/clamp_ops.h> |
129 | #include <ATen/ops/_convolution_native.h> |
130 | #include <ATen/ops/_convolution_ops.h> |
131 | #include <ATen/ops/_convolution_native.h> |
132 | #include <ATen/ops/_convolution_ops.h> |
133 | #include <ATen/ops/copy_native.h> |
134 | #include <ATen/ops/copy_ops.h> |
135 | #include <ATen/ops/copy_native.h> |
136 | #include <ATen/ops/copy_ops.h> |
137 | #include <ATen/ops/copy_native.h> |
138 | #include <ATen/ops/copy_ops.h> |
139 | #include <ATen/ops/cudnn_convolution_native.h> |
140 | #include <ATen/ops/cudnn_convolution_ops.h> |
141 | #include <ATen/ops/cudnn_convolution_native.h> |
142 | #include <ATen/ops/cudnn_convolution_ops.h> |
143 | #include <ATen/ops/divide_native.h> |
144 | #include <ATen/ops/divide_ops.h> |
145 | #include <ATen/ops/divide_native.h> |
146 | #include <ATen/ops/divide_ops.h> |
147 | #include <ATen/ops/divide_native.h> |
148 | #include <ATen/ops/divide_ops.h> |
149 | #include <ATen/ops/divide_native.h> |
150 | #include <ATen/ops/divide_ops.h> |
151 | #include <ATen/ops/divide_native.h> |
152 | #include <ATen/ops/divide_ops.h> |
153 | #include <ATen/ops/divide_native.h> |
154 | #include <ATen/ops/divide_ops.h> |
155 | #include <ATen/ops/true_divide_native.h> |
156 | #include <ATen/ops/true_divide_ops.h> |
157 | #include <ATen/ops/true_divide_native.h> |
158 | #include <ATen/ops/true_divide_ops.h> |
159 | #include <ATen/ops/true_divide_native.h> |
160 | #include <ATen/ops/true_divide_ops.h> |
161 | #include <ATen/ops/dot_native.h> |
162 | #include <ATen/ops/dot_ops.h> |
163 | #include <ATen/ops/dot_native.h> |
164 | #include <ATen/ops/dot_ops.h> |
165 | #include <ATen/ops/vdot_native.h> |
166 | #include <ATen/ops/vdot_ops.h> |
167 | #include <ATen/ops/vdot_native.h> |
168 | #include <ATen/ops/vdot_ops.h> |
169 | #include <ATen/ops/row_stack_native.h> |
170 | #include <ATen/ops/row_stack_ops.h> |
171 | #include <ATen/ops/row_stack_native.h> |
172 | #include <ATen/ops/row_stack_ops.h> |
173 | #include <ATen/ops/new_empty_native.h> |
174 | #include <ATen/ops/new_empty_ops.h> |
175 | #include <ATen/ops/new_empty_native.h> |
176 | #include <ATen/ops/new_empty_ops.h> |
177 | #include <ATen/ops/new_empty_strided_native.h> |
178 | #include <ATen/ops/new_empty_strided_ops.h> |
179 | #include <ATen/ops/new_empty_strided_native.h> |
180 | #include <ATen/ops/new_empty_strided_ops.h> |
181 | #include <ATen/ops/_empty_affine_quantized_native.h> |
182 | #include <ATen/ops/_empty_affine_quantized_ops.h> |
183 | #include <ATen/ops/_empty_affine_quantized_native.h> |
184 | #include <ATen/ops/_empty_affine_quantized_ops.h> |
185 | #include <ATen/ops/_resize_output_native.h> |
186 | #include <ATen/ops/_resize_output_ops.h> |
187 | #include <ATen/ops/_resize_output_native.h> |
188 | #include <ATen/ops/_resize_output_ops.h> |
189 | #include <ATen/ops/_resize_output_native.h> |
190 | #include <ATen/ops/_resize_output_ops.h> |
191 | #include <ATen/ops/frac_native.h> |
192 | #include <ATen/ops/frac_ops.h> |
193 | #include <ATen/ops/frac_native.h> |
194 | #include <ATen/ops/frac_ops.h> |
195 | #include <ATen/ops/frac_native.h> |
196 | #include <ATen/ops/frac_ops.h> |
197 | #include <ATen/ops/full_like_native.h> |
198 | #include <ATen/ops/full_like_ops.h> |
199 | #include <ATen/ops/full_like_native.h> |
200 | #include <ATen/ops/full_like_ops.h> |
201 | #include <ATen/ops/lcm_native.h> |
202 | #include <ATen/ops/lcm_ops.h> |
203 | #include <ATen/ops/lcm_native.h> |
204 | #include <ATen/ops/lcm_ops.h> |
205 | #include <ATen/ops/lcm_native.h> |
206 | #include <ATen/ops/lcm_ops.h> |
207 | #include <ATen/ops/hann_window_native.h> |
208 | #include <ATen/ops/hann_window_ops.h> |
209 | #include <ATen/ops/hann_window_native.h> |
210 | #include <ATen/ops/hann_window_ops.h> |
211 | #include <ATen/ops/hann_window_native.h> |
212 | #include <ATen/ops/hann_window_ops.h> |
213 | #include <ATen/ops/hann_window_native.h> |
214 | #include <ATen/ops/hann_window_ops.h> |
215 | #include <ATen/ops/kaiser_window_native.h> |
216 | #include <ATen/ops/kaiser_window_ops.h> |
217 | #include <ATen/ops/kaiser_window_native.h> |
218 | #include <ATen/ops/kaiser_window_ops.h> |
219 | #include <ATen/ops/kaiser_window_native.h> |
220 | #include <ATen/ops/kaiser_window_ops.h> |
221 | #include <ATen/ops/kaiser_window_native.h> |
222 | #include <ATen/ops/kaiser_window_ops.h> |
223 | #include <ATen/ops/kaiser_window_native.h> |
224 | #include <ATen/ops/kaiser_window_ops.h> |
225 | #include <ATen/ops/kaiser_window_native.h> |
226 | #include <ATen/ops/kaiser_window_ops.h> |
227 | #include <ATen/ops/_index_put_impl_native.h> |
228 | #include <ATen/ops/_index_put_impl_ops.h> |
229 | #include <ATen/ops/_index_put_impl_native.h> |
230 | #include <ATen/ops/_index_put_impl_ops.h> |
231 | #include <ATen/ops/_index_put_impl_native.h> |
232 | #include <ATen/ops/_index_put_impl_ops.h> |
233 | #include <ATen/ops/kthvalue_native.h> |
234 | #include <ATen/ops/kthvalue_ops.h> |
235 | #include <ATen/ops/kthvalue_native.h> |
236 | #include <ATen/ops/kthvalue_ops.h> |
237 | #include <ATen/ops/kthvalue_native.h> |
238 | #include <ATen/ops/kthvalue_ops.h> |
239 | #include <ATen/ops/kthvalue_native.h> |
240 | #include <ATen/ops/kthvalue_ops.h> |
241 | #include <ATen/ops/native_layer_norm_native.h> |
242 | #include <ATen/ops/native_layer_norm_ops.h> |
243 | #include <ATen/ops/native_layer_norm_native.h> |
244 | #include <ATen/ops/native_layer_norm_ops.h> |
245 | #include <ATen/ops/native_layer_norm_backward_native.h> |
246 | #include <ATen/ops/native_layer_norm_backward_ops.h> |
247 | #include <ATen/ops/native_layer_norm_backward_native.h> |
248 | #include <ATen/ops/native_layer_norm_backward_ops.h> |
249 | #include <ATen/ops/mkldnn_linear_backward_input_native.h> |
250 | #include <ATen/ops/mkldnn_linear_backward_input_ops.h> |
251 | #include <ATen/ops/mkldnn_linear_backward_input_native.h> |
252 | #include <ATen/ops/mkldnn_linear_backward_input_ops.h> |
253 | #include <ATen/ops/mkldnn_linear_backward_native.h> |
254 | #include <ATen/ops/mkldnn_linear_backward_ops.h> |
255 | #include <ATen/ops/mkldnn_linear_backward_native.h> |
256 | #include <ATen/ops/mkldnn_linear_backward_ops.h> |
257 | #include <ATen/ops/log10_native.h> |
258 | #include <ATen/ops/log10_ops.h> |
259 | #include <ATen/ops/log10_native.h> |
260 | #include <ATen/ops/log10_ops.h> |
261 | #include <ATen/ops/log10_native.h> |
262 | #include <ATen/ops/log10_ops.h> |
263 | #include <ATen/ops/log1p_native.h> |
264 | #include <ATen/ops/log1p_ops.h> |
265 | #include <ATen/ops/log1p_native.h> |
266 | #include <ATen/ops/log1p_ops.h> |
267 | #include <ATen/ops/log1p_native.h> |
268 | #include <ATen/ops/log1p_ops.h> |
269 | #include <ATen/ops/logsumexp_native.h> |
270 | #include <ATen/ops/logsumexp_ops.h> |
271 | #include <ATen/ops/logsumexp_native.h> |
272 | #include <ATen/ops/logsumexp_ops.h> |
273 | #include <ATen/ops/logsumexp_native.h> |
274 | #include <ATen/ops/logsumexp_ops.h> |
275 | #include <ATen/ops/logsumexp_native.h> |
276 | #include <ATen/ops/logsumexp_ops.h> |
277 | #include <ATen/ops/matmul_native.h> |
278 | #include <ATen/ops/matmul_ops.h> |
279 | #include <ATen/ops/matmul_native.h> |
280 | #include <ATen/ops/matmul_ops.h> |
281 | #include <ATen/ops/mkldnn_max_pool2d_native.h> |
282 | #include <ATen/ops/mkldnn_max_pool2d_ops.h> |
283 | #include <ATen/ops/mkldnn_max_pool2d_native.h> |
284 | #include <ATen/ops/mkldnn_max_pool2d_ops.h> |
285 | #include <ATen/ops/quantized_max_pool1d_native.h> |
286 | #include <ATen/ops/quantized_max_pool1d_ops.h> |
287 | #include <ATen/ops/quantized_max_pool1d_native.h> |
288 | #include <ATen/ops/quantized_max_pool1d_ops.h> |
289 | #include <ATen/ops/nanmean_native.h> |
290 | #include <ATen/ops/nanmean_ops.h> |
291 | #include <ATen/ops/nanmean_native.h> |
292 | #include <ATen/ops/nanmean_ops.h> |
293 | #include <ATen/ops/_mps_convolution_native.h> |
294 | #include <ATen/ops/_mps_convolution_ops.h> |
295 | #include <ATen/ops/_mps_convolution_native.h> |
296 | #include <ATen/ops/_mps_convolution_ops.h> |
297 | #include <ATen/ops/mkldnn_convolution_native.h> |
298 | #include <ATen/ops/mkldnn_convolution_ops.h> |
299 | #include <ATen/ops/mkldnn_convolution_native.h> |
300 | #include <ATen/ops/mkldnn_convolution_ops.h> |
301 | #include <ATen/ops/mkldnn_rnn_layer_native.h> |
302 | #include <ATen/ops/mkldnn_rnn_layer_ops.h> |
303 | #include <ATen/ops/mkldnn_rnn_layer_native.h> |
304 | #include <ATen/ops/mkldnn_rnn_layer_ops.h> |
305 | #include <ATen/ops/miopen_batch_norm_native.h> |
306 | #include <ATen/ops/miopen_batch_norm_ops.h> |
307 | #include <ATen/ops/miopen_batch_norm_native.h> |
308 | #include <ATen/ops/miopen_batch_norm_ops.h> |
309 | #include <ATen/ops/miopen_batch_norm_backward_native.h> |
310 | #include <ATen/ops/miopen_batch_norm_backward_ops.h> |
311 | #include <ATen/ops/miopen_batch_norm_backward_native.h> |
312 | #include <ATen/ops/miopen_batch_norm_backward_ops.h> |
313 | #include <ATen/ops/miopen_convolution_transpose_native.h> |
314 | #include <ATen/ops/miopen_convolution_transpose_ops.h> |
315 | #include <ATen/ops/miopen_convolution_transpose_native.h> |
316 | #include <ATen/ops/miopen_convolution_transpose_ops.h> |
317 | #include <ATen/ops/mm_native.h> |
318 | #include <ATen/ops/mm_ops.h> |
319 | #include <ATen/ops/mm_native.h> |
320 | #include <ATen/ops/mm_ops.h> |
321 | #include <ATen/ops/_sparse_sparse_matmul_native.h> |
322 | #include <ATen/ops/_sparse_sparse_matmul_ops.h> |
323 | #include <ATen/ops/_sparse_sparse_matmul_native.h> |
324 | #include <ATen/ops/_sparse_sparse_matmul_ops.h> |
325 | #include <ATen/ops/mul_native.h> |
326 | #include <ATen/ops/mul_ops.h> |
327 | #include <ATen/ops/mul_native.h> |
328 | #include <ATen/ops/mul_ops.h> |
329 | #include <ATen/ops/mul_native.h> |
330 | #include <ATen/ops/mul_ops.h> |
331 | #include <ATen/ops/mul_native.h> |
332 | #include <ATen/ops/mul_ops.h> |
333 | #include <ATen/ops/mul_native.h> |
334 | #include <ATen/ops/mul_ops.h> |
335 | #include <ATen/ops/mul_native.h> |
336 | #include <ATen/ops/mul_ops.h> |
337 | #include <ATen/ops/mvlgamma_native.h> |
338 | #include <ATen/ops/mvlgamma_ops.h> |
339 | #include <ATen/ops/mvlgamma_native.h> |
340 | #include <ATen/ops/mvlgamma_ops.h> |
341 | #include <ATen/ops/mvlgamma_native.h> |
342 | #include <ATen/ops/mvlgamma_ops.h> |
343 | #include <ATen/ops/batch_norm_backward_reduce_native.h> |
344 | #include <ATen/ops/batch_norm_backward_reduce_ops.h> |
345 | #include <ATen/ops/batch_norm_backward_reduce_native.h> |
346 | #include <ATen/ops/batch_norm_backward_reduce_ops.h> |
347 | #include <ATen/ops/deg2rad_native.h> |
348 | #include <ATen/ops/deg2rad_ops.h> |
349 | #include <ATen/ops/deg2rad_native.h> |
350 | #include <ATen/ops/deg2rad_ops.h> |
351 | #include <ATen/ops/deg2rad_native.h> |
352 | #include <ATen/ops/deg2rad_ops.h> |
353 | #include <ATen/ops/randint_like_native.h> |
354 | #include <ATen/ops/randint_like_ops.h> |
355 | #include <ATen/ops/randint_like_native.h> |
356 | #include <ATen/ops/randint_like_ops.h> |
357 | #include <ATen/ops/randint_like_native.h> |
358 | #include <ATen/ops/randint_like_ops.h> |
359 | #include <ATen/ops/randint_like_native.h> |
360 | #include <ATen/ops/randint_like_ops.h> |
361 | #include <ATen/ops/repeat_native.h> |
362 | #include <ATen/ops/repeat_ops.h> |
363 | #include <ATen/ops/repeat_native.h> |
364 | #include <ATen/ops/repeat_ops.h> |
365 | #include <ATen/ops/_mkldnn_reshape_native.h> |
366 | #include <ATen/ops/_mkldnn_reshape_ops.h> |
367 | #include <ATen/ops/_mkldnn_reshape_native.h> |
368 | #include <ATen/ops/_mkldnn_reshape_ops.h> |
369 | #include <ATen/ops/round_native.h> |
370 | #include <ATen/ops/round_ops.h> |
371 | #include <ATen/ops/round_native.h> |
372 | #include <ATen/ops/round_ops.h> |
373 | #include <ATen/ops/round_native.h> |
374 | #include <ATen/ops/round_ops.h> |
375 | #include <ATen/ops/round_native.h> |
376 | #include <ATen/ops/round_ops.h> |
377 | #include <ATen/ops/round_native.h> |
378 | #include <ATen/ops/round_ops.h> |
379 | #include <ATen/ops/round_native.h> |
380 | #include <ATen/ops/round_ops.h> |
381 | #include <ATen/ops/rsqrt_native.h> |
382 | #include <ATen/ops/rsqrt_ops.h> |
383 | #include <ATen/ops/rsqrt_native.h> |
384 | #include <ATen/ops/rsqrt_ops.h> |
385 | #include <ATen/ops/rsqrt_native.h> |
386 | #include <ATen/ops/rsqrt_ops.h> |
387 | #include <ATen/ops/celu_native.h> |
388 | #include <ATen/ops/celu_ops.h> |
389 | #include <ATen/ops/celu_native.h> |
390 | #include <ATen/ops/celu_ops.h> |
391 | #include <ATen/ops/celu_native.h> |
392 | #include <ATen/ops/celu_ops.h> |
393 | #include <ATen/ops/sigmoid_native.h> |
394 | #include <ATen/ops/sigmoid_ops.h> |
395 | #include <ATen/ops/sigmoid_native.h> |
396 | #include <ATen/ops/sigmoid_ops.h> |
397 | #include <ATen/ops/sigmoid_native.h> |
398 | #include <ATen/ops/sigmoid_ops.h> |
399 | #include <ATen/ops/sinc_native.h> |
400 | #include <ATen/ops/sinc_ops.h> |
401 | #include <ATen/ops/sinc_native.h> |
402 | #include <ATen/ops/sinc_ops.h> |
403 | #include <ATen/ops/sinc_native.h> |
404 | #include <ATen/ops/sinc_ops.h> |
405 | #include <ATen/ops/sinh_native.h> |
406 | #include <ATen/ops/sinh_ops.h> |
407 | #include <ATen/ops/sinh_native.h> |
408 | #include <ATen/ops/sinh_ops.h> |
409 | #include <ATen/ops/sinh_native.h> |
410 | #include <ATen/ops/sinh_ops.h> |
411 | #include <ATen/ops/slice_backward_native.h> |
412 | #include <ATen/ops/slice_backward_ops.h> |
413 | #include <ATen/ops/slice_backward_native.h> |
414 | #include <ATen/ops/slice_backward_ops.h> |
415 | #include <ATen/ops/as_strided_scatter_native.h> |
416 | #include <ATen/ops/as_strided_scatter_ops.h> |
417 | #include <ATen/ops/as_strided_scatter_native.h> |
418 | #include <ATen/ops/as_strided_scatter_ops.h> |
419 | #include <ATen/ops/std_mean_native.h> |
420 | #include <ATen/ops/std_mean_ops.h> |
421 | #include <ATen/ops/std_mean_native.h> |
422 | #include <ATen/ops/std_mean_ops.h> |
423 | #include <ATen/ops/_mkldnn_transpose_native.h> |
424 | #include <ATen/ops/_mkldnn_transpose_ops.h> |
425 | #include <ATen/ops/_mkldnn_transpose_native.h> |
426 | #include <ATen/ops/_mkldnn_transpose_ops.h> |
427 | #include <ATen/ops/_mkldnn_transpose_native.h> |
428 | #include <ATen/ops/_mkldnn_transpose_ops.h> |
429 | #include <ATen/ops/flip_native.h> |
430 | #include <ATen/ops/flip_ops.h> |
431 | #include <ATen/ops/flip_native.h> |
432 | #include <ATen/ops/flip_ops.h> |
433 | #include <ATen/ops/_nested_tensor_from_mask_native.h> |
434 | #include <ATen/ops/_nested_tensor_from_mask_ops.h> |
435 | #include <ATen/ops/_nested_tensor_from_mask_native.h> |
436 | #include <ATen/ops/_nested_tensor_from_mask_ops.h> |
437 | #include <ATen/ops/_nested_from_padded_and_nested_example_native.h> |
438 | #include <ATen/ops/_nested_from_padded_and_nested_example_ops.h> |
439 | #include <ATen/ops/_nested_from_padded_and_nested_example_native.h> |
440 | #include <ATen/ops/_nested_from_padded_and_nested_example_ops.h> |
441 | #include <ATen/ops/unique_dim_native.h> |
442 | #include <ATen/ops/unique_dim_ops.h> |
443 | #include <ATen/ops/unique_dim_native.h> |
444 | #include <ATen/ops/unique_dim_ops.h> |
445 | #include <ATen/ops/_unsafe_view_native.h> |
446 | #include <ATen/ops/_unsafe_view_ops.h> |
447 | #include <ATen/ops/_unsafe_view_native.h> |
448 | #include <ATen/ops/_unsafe_view_ops.h> |
449 | #include <ATen/ops/var_mean_native.h> |
450 | #include <ATen/ops/var_mean_ops.h> |
451 | #include <ATen/ops/var_mean_native.h> |
452 | #include <ATen/ops/var_mean_ops.h> |
453 | #include <ATen/ops/zeros_native.h> |
454 | #include <ATen/ops/zeros_ops.h> |
455 | #include <ATen/ops/zeros_native.h> |
456 | #include <ATen/ops/zeros_ops.h> |
457 | #include <ATen/ops/zeros_native.h> |
458 | #include <ATen/ops/zeros_ops.h> |
459 | #include <ATen/ops/zeros_native.h> |
460 | #include <ATen/ops/zeros_ops.h> |
461 | #include <ATen/ops/zeros_like_native.h> |
462 | #include <ATen/ops/zeros_like_ops.h> |
463 | #include <ATen/ops/zeros_like_native.h> |
464 | #include <ATen/ops/zeros_like_ops.h> |
465 | #include <ATen/ops/_sparse_csr_prod_native.h> |
466 | #include <ATen/ops/_sparse_csr_prod_ops.h> |
467 | #include <ATen/ops/_sparse_csr_prod_native.h> |
468 | #include <ATen/ops/_sparse_csr_prod_ops.h> |
469 | #include <ATen/ops/_spdiags_native.h> |
470 | #include <ATen/ops/_spdiags_ops.h> |
471 | #include <ATen/ops/_spdiags_native.h> |
472 | #include <ATen/ops/_spdiags_ops.h> |
473 | #include <ATen/ops/rsub_native.h> |
474 | #include <ATen/ops/rsub_ops.h> |
475 | #include <ATen/ops/rsub_native.h> |
476 | #include <ATen/ops/rsub_ops.h> |
477 | #include <ATen/ops/rsub_native.h> |
478 | #include <ATen/ops/rsub_ops.h> |
479 | #include <ATen/ops/rsub_native.h> |
480 | #include <ATen/ops/rsub_ops.h> |
481 | #include <ATen/ops/_sparse_addmm_native.h> |
482 | #include <ATen/ops/_sparse_addmm_ops.h> |
483 | #include <ATen/ops/_sparse_addmm_native.h> |
484 | #include <ATen/ops/_sparse_addmm_ops.h> |
485 | #include <ATen/ops/sparse_coo_tensor_native.h> |
486 | #include <ATen/ops/sparse_coo_tensor_ops.h> |
487 | #include <ATen/ops/sparse_coo_tensor_native.h> |
488 | #include <ATen/ops/sparse_coo_tensor_ops.h> |
489 | #include <ATen/ops/sparse_resize_native.h> |
490 | #include <ATen/ops/sparse_resize_ops.h> |
491 | #include <ATen/ops/sparse_resize_native.h> |
492 | #include <ATen/ops/sparse_resize_ops.h> |
493 | #include <ATen/ops/sparse_resize_native.h> |
494 | #include <ATen/ops/sparse_resize_ops.h> |
495 | #include <ATen/ops/sparse_mask_native.h> |
496 | #include <ATen/ops/sparse_mask_ops.h> |
497 | #include <ATen/ops/sparse_mask_native.h> |
498 | #include <ATen/ops/sparse_mask_ops.h> |
499 | #include <ATen/ops/_coalesce_native.h> |
500 | #include <ATen/ops/_coalesce_ops.h> |
501 | #include <ATen/ops/_coalesce_native.h> |
502 | #include <ATen/ops/_coalesce_ops.h> |
503 | #include <ATen/ops/dequantize_native.h> |
504 | #include <ATen/ops/dequantize_ops.h> |
505 | #include <ATen/ops/dequantize_native.h> |
506 | #include <ATen/ops/dequantize_ops.h> |
507 | #include <ATen/ops/dequantize_native.h> |
508 | #include <ATen/ops/dequantize_ops.h> |
509 | #include <ATen/ops/dequantize_native.h> |
510 | #include <ATen/ops/dequantize_ops.h> |
511 | #include <ATen/ops/q_per_channel_zero_points_native.h> |
512 | #include <ATen/ops/q_per_channel_zero_points_ops.h> |
513 | #include <ATen/ops/q_per_channel_zero_points_native.h> |
514 | #include <ATen/ops/q_per_channel_zero_points_ops.h> |
515 | #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_native.h> |
516 | #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_ops.h> |
517 | #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_native.h> |
518 | #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_ops.h> |
519 | #include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h> |
520 | #include <ATen/ops/_fused_moving_avg_obs_fq_helper_ops.h> |
521 | #include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h> |
522 | #include <ATen/ops/_fused_moving_avg_obs_fq_helper_ops.h> |
523 | #include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h> |
524 | #include <ATen/ops/_fused_moving_avg_obs_fq_helper_ops.h> |
525 | #include <ATen/ops/_to_copy_native.h> |
526 | #include <ATen/ops/_to_copy_ops.h> |
527 | #include <ATen/ops/_to_copy_native.h> |
528 | #include <ATen/ops/_to_copy_ops.h> |
529 | #include <ATen/ops/_thnn_fused_gru_cell_native.h> |
530 | #include <ATen/ops/_thnn_fused_gru_cell_ops.h> |
531 | #include <ATen/ops/_thnn_fused_gru_cell_native.h> |
532 | #include <ATen/ops/_thnn_fused_gru_cell_ops.h> |
533 | #include <ATen/ops/_pack_padded_sequence_native.h> |
534 | #include <ATen/ops/_pack_padded_sequence_ops.h> |
535 | #include <ATen/ops/_pack_padded_sequence_native.h> |
536 | #include <ATen/ops/_pack_padded_sequence_ops.h> |
537 | #include <ATen/ops/scatter_reduce_native.h> |
538 | #include <ATen/ops/scatter_reduce_ops.h> |
539 | #include <ATen/ops/scatter_reduce_native.h> |
540 | #include <ATen/ops/scatter_reduce_ops.h> |
541 | #include <ATen/ops/scatter_reduce_native.h> |
542 | #include <ATen/ops/scatter_reduce_ops.h> |
543 | #include <ATen/ops/bitwise_xor_native.h> |
544 | #include <ATen/ops/bitwise_xor_ops.h> |
545 | #include <ATen/ops/bitwise_xor_native.h> |
546 | #include <ATen/ops/bitwise_xor_ops.h> |
547 | #include <ATen/ops/bitwise_xor_native.h> |
548 | #include <ATen/ops/bitwise_xor_ops.h> |
549 | #include <ATen/ops/bitwise_xor_native.h> |
550 | #include <ATen/ops/bitwise_xor_ops.h> |
551 | #include <ATen/ops/bitwise_xor_native.h> |
552 | #include <ATen/ops/bitwise_xor_ops.h> |
553 | #include <ATen/ops/bitwise_xor_native.h> |
554 | #include <ATen/ops/bitwise_xor_ops.h> |
555 | #include <ATen/ops/bitwise_xor_native.h> |
556 | #include <ATen/ops/bitwise_xor_ops.h> |
557 | #include <ATen/ops/bitwise_xor_native.h> |
558 | #include <ATen/ops/bitwise_xor_ops.h> |
559 | #include <ATen/ops/addbmm_native.h> |
560 | #include <ATen/ops/addbmm_ops.h> |
561 | #include <ATen/ops/addbmm_native.h> |
562 | #include <ATen/ops/addbmm_ops.h> |
563 | #include <ATen/ops/addbmm_native.h> |
564 | #include <ATen/ops/addbmm_ops.h> |
565 | #include <ATen/ops/random_native.h> |
566 | #include <ATen/ops/random_ops.h> |
567 | #include <ATen/ops/random_native.h> |
568 | #include <ATen/ops/random_ops.h> |
569 | #include <ATen/ops/random_native.h> |
570 | #include <ATen/ops/random_ops.h> |
571 | #include <ATen/ops/random_native.h> |
572 | #include <ATen/ops/random_ops.h> |
573 | #include <ATen/ops/random_native.h> |
574 | #include <ATen/ops/random_ops.h> |
575 | #include <ATen/ops/random_native.h> |
576 | #include <ATen/ops/random_ops.h> |
577 | #include <ATen/ops/random_native.h> |
578 | #include <ATen/ops/random_ops.h> |
579 | #include <ATen/ops/random_native.h> |
580 | #include <ATen/ops/random_ops.h> |
581 | #include <ATen/ops/random_native.h> |
582 | #include <ATen/ops/random_ops.h> |
583 | #include <ATen/ops/exponential_native.h> |
584 | #include <ATen/ops/exponential_ops.h> |
585 | #include <ATen/ops/exponential_native.h> |
586 | #include <ATen/ops/exponential_ops.h> |
587 | #include <ATen/ops/exponential_native.h> |
588 | #include <ATen/ops/exponential_ops.h> |
589 | #include <ATen/ops/geometric_native.h> |
590 | #include <ATen/ops/geometric_ops.h> |
591 | #include <ATen/ops/geometric_native.h> |
592 | #include <ATen/ops/geometric_ops.h> |
593 | #include <ATen/ops/geometric_native.h> |
594 | #include <ATen/ops/geometric_ops.h> |
595 | #include <ATen/ops/cross_native.h> |
596 | #include <ATen/ops/cross_ops.h> |
597 | #include <ATen/ops/cross_native.h> |
598 | #include <ATen/ops/cross_ops.h> |
599 | #include <ATen/ops/trace_native.h> |
600 | #include <ATen/ops/trace_ops.h> |
601 | #include <ATen/ops/trace_native.h> |
602 | #include <ATen/ops/trace_ops.h> |
603 | #include <ATen/ops/take_along_dim_native.h> |
604 | #include <ATen/ops/take_along_dim_ops.h> |
605 | #include <ATen/ops/take_along_dim_native.h> |
606 | #include <ATen/ops/take_along_dim_ops.h> |
607 | #include <ATen/ops/index_select_native.h> |
608 | #include <ATen/ops/index_select_ops.h> |
609 | #include <ATen/ops/index_select_native.h> |
610 | #include <ATen/ops/index_select_ops.h> |
611 | #include <ATen/ops/index_select_native.h> |
612 | #include <ATen/ops/index_select_ops.h> |
613 | #include <ATen/ops/index_select_native.h> |
614 | #include <ATen/ops/index_select_ops.h> |
615 | #include <ATen/ops/masked_select_native.h> |
616 | #include <ATen/ops/masked_select_ops.h> |
617 | #include <ATen/ops/masked_select_native.h> |
618 | #include <ATen/ops/masked_select_ops.h> |
619 | #include <ATen/ops/linalg_solve_triangular_native.h> |
620 | #include <ATen/ops/linalg_solve_triangular_ops.h> |
621 | #include <ATen/ops/linalg_solve_triangular_native.h> |
622 | #include <ATen/ops/linalg_solve_triangular_ops.h> |
623 | #include <ATen/ops/svd_native.h> |
624 | #include <ATen/ops/svd_ops.h> |
625 | #include <ATen/ops/svd_native.h> |
626 | #include <ATen/ops/svd_ops.h> |
627 | #include <ATen/ops/multinomial_native.h> |
628 | #include <ATen/ops/multinomial_ops.h> |
629 | #include <ATen/ops/multinomial_native.h> |
630 | #include <ATen/ops/multinomial_ops.h> |
631 | #include <ATen/ops/histogram_native.h> |
632 | #include <ATen/ops/histogram_ops.h> |
633 | #include <ATen/ops/histogram_native.h> |
634 | #include <ATen/ops/histogram_ops.h> |
635 | #include <ATen/ops/histogram_native.h> |
636 | #include <ATen/ops/histogram_ops.h> |
637 | #include <ATen/ops/histogram_native.h> |
638 | #include <ATen/ops/histogram_ops.h> |
639 | #include <ATen/ops/igammac_native.h> |
640 | #include <ATen/ops/igammac_ops.h> |
641 | #include <ATen/ops/igammac_native.h> |
642 | #include <ATen/ops/igammac_ops.h> |
643 | #include <ATen/ops/igammac_native.h> |
644 | #include <ATen/ops/igammac_ops.h> |
645 | #include <ATen/ops/remainder_native.h> |
646 | #include <ATen/ops/remainder_ops.h> |
647 | #include <ATen/ops/remainder_native.h> |
648 | #include <ATen/ops/remainder_ops.h> |
649 | #include <ATen/ops/remainder_native.h> |
650 | #include <ATen/ops/remainder_ops.h> |
651 | #include <ATen/ops/remainder_native.h> |
652 | #include <ATen/ops/remainder_ops.h> |
653 | #include <ATen/ops/remainder_native.h> |
654 | #include <ATen/ops/remainder_ops.h> |
655 | #include <ATen/ops/remainder_native.h> |
656 | #include <ATen/ops/remainder_ops.h> |
657 | #include <ATen/ops/remainder_native.h> |
658 | #include <ATen/ops/remainder_ops.h> |
659 | #include <ATen/ops/remainder_native.h> |
660 | #include <ATen/ops/remainder_ops.h> |
661 | #include <ATen/ops/quantile_native.h> |
662 | #include <ATen/ops/quantile_ops.h> |
663 | #include <ATen/ops/quantile_native.h> |
664 | #include <ATen/ops/quantile_ops.h> |
665 | #include <ATen/ops/quantile_native.h> |
666 | #include <ATen/ops/quantile_ops.h> |
667 | #include <ATen/ops/quantile_native.h> |
668 | #include <ATen/ops/quantile_ops.h> |
669 | #include <ATen/ops/nanquantile_native.h> |
670 | #include <ATen/ops/nanquantile_ops.h> |
671 | #include <ATen/ops/nanquantile_native.h> |
672 | #include <ATen/ops/nanquantile_ops.h> |
673 | #include <ATen/ops/nanquantile_native.h> |
674 | #include <ATen/ops/nanquantile_ops.h> |
675 | #include <ATen/ops/nanquantile_native.h> |
676 | #include <ATen/ops/nanquantile_ops.h> |
677 | #include <ATen/ops/sort_native.h> |
678 | #include <ATen/ops/sort_ops.h> |
679 | #include <ATen/ops/sort_native.h> |
680 | #include <ATen/ops/sort_ops.h> |
681 | #include <ATen/ops/sort_native.h> |
682 | #include <ATen/ops/sort_ops.h> |
683 | #include <ATen/ops/sort_native.h> |
684 | #include <ATen/ops/sort_ops.h> |
685 | #include <ATen/ops/sort_native.h> |
686 | #include <ATen/ops/sort_ops.h> |
687 | #include <ATen/ops/sort_native.h> |
688 | #include <ATen/ops/sort_ops.h> |
689 | #include <ATen/ops/sort_native.h> |
690 | #include <ATen/ops/sort_ops.h> |
691 | #include <ATen/ops/sort_native.h> |
692 | #include <ATen/ops/sort_ops.h> |
693 | #include <ATen/ops/argsort_native.h> |
694 | #include <ATen/ops/argsort_ops.h> |
695 | #include <ATen/ops/argsort_native.h> |
696 | #include <ATen/ops/argsort_ops.h> |
697 | #include <ATen/ops/all_native.h> |
698 | #include <ATen/ops/all_ops.h> |
699 | #include <ATen/ops/all_native.h> |
700 | #include <ATen/ops/all_ops.h> |
701 | #include <ATen/ops/renorm_native.h> |
702 | #include <ATen/ops/renorm_ops.h> |
703 | #include <ATen/ops/renorm_native.h> |
704 | #include <ATen/ops/renorm_ops.h> |
705 | #include <ATen/ops/renorm_native.h> |
706 | #include <ATen/ops/renorm_ops.h> |
707 | #include <ATen/ops/unfold_backward_native.h> |
708 | #include <ATen/ops/unfold_backward_ops.h> |
709 | #include <ATen/ops/unfold_backward_native.h> |
710 | #include <ATen/ops/unfold_backward_ops.h> |
711 | #include <ATen/ops/pow_native.h> |
712 | #include <ATen/ops/pow_ops.h> |
713 | #include <ATen/ops/pow_native.h> |
714 | #include <ATen/ops/pow_ops.h> |
715 | #include <ATen/ops/pow_native.h> |
716 | #include <ATen/ops/pow_ops.h> |
717 | #include <ATen/ops/pow_native.h> |
718 | #include <ATen/ops/pow_ops.h> |
719 | #include <ATen/ops/pow_native.h> |
720 | #include <ATen/ops/pow_ops.h> |
721 | #include <ATen/ops/pow_native.h> |
722 | #include <ATen/ops/pow_ops.h> |
723 | #include <ATen/ops/pow_native.h> |
724 | #include <ATen/ops/pow_ops.h> |
725 | #include <ATen/ops/pow_native.h> |
726 | #include <ATen/ops/pow_ops.h> |
727 | #include <ATen/ops/float_power_native.h> |
728 | #include <ATen/ops/float_power_ops.h> |
729 | #include <ATen/ops/float_power_native.h> |
730 | #include <ATen/ops/float_power_ops.h> |
731 | #include <ATen/ops/float_power_native.h> |
732 | #include <ATen/ops/float_power_ops.h> |
733 | #include <ATen/ops/float_power_native.h> |
734 | #include <ATen/ops/float_power_ops.h> |
735 | #include <ATen/ops/float_power_native.h> |
736 | #include <ATen/ops/float_power_ops.h> |
737 | #include <ATen/ops/float_power_native.h> |
738 | #include <ATen/ops/float_power_ops.h> |
739 | #include <ATen/ops/float_power_native.h> |
740 | #include <ATen/ops/float_power_ops.h> |
741 | #include <ATen/ops/float_power_native.h> |
742 | #include <ATen/ops/float_power_ops.h> |
743 | #include <ATen/ops/normal_native.h> |
744 | #include <ATen/ops/normal_ops.h> |
745 | #include <ATen/ops/normal_native.h> |
746 | #include <ATen/ops/normal_ops.h> |
747 | #include <ATen/ops/normal_native.h> |
748 | #include <ATen/ops/normal_ops.h> |
749 | #include <ATen/ops/normal_native.h> |
750 | #include <ATen/ops/normal_ops.h> |
751 | #include <ATen/ops/normal_native.h> |
752 | #include <ATen/ops/normal_ops.h> |
753 | #include <ATen/ops/normal_native.h> |
754 | #include <ATen/ops/normal_ops.h> |
755 | #include <ATen/ops/normal_native.h> |
756 | #include <ATen/ops/normal_ops.h> |
757 | #include <ATen/ops/normal_native.h> |
758 | #include <ATen/ops/normal_ops.h> |
759 | #include <ATen/ops/normal_native.h> |
760 | #include <ATen/ops/normal_ops.h> |
761 | #include <ATen/ops/normal_native.h> |
762 | #include <ATen/ops/normal_ops.h> |
763 | #include <ATen/ops/normal_native.h> |
764 | #include <ATen/ops/normal_ops.h> |
765 | #include <ATen/ops/_amp_update_scale_native.h> |
766 | #include <ATen/ops/_amp_update_scale_ops.h> |
767 | #include <ATen/ops/_amp_update_scale_native.h> |
768 | #include <ATen/ops/_amp_update_scale_ops.h> |
769 | #include <ATen/ops/_amp_update_scale_native.h> |
770 | #include <ATen/ops/_amp_update_scale_ops.h> |
771 | #include <ATen/ops/_foreach_atan_native.h> |
772 | #include <ATen/ops/_foreach_atan_ops.h> |
773 | #include <ATen/ops/_foreach_atan_native.h> |
774 | #include <ATen/ops/_foreach_atan_ops.h> |
775 | #include <ATen/ops/_foreach_atan_native.h> |
776 | #include <ATen/ops/_foreach_atan_ops.h> |
777 | #include <ATen/ops/_foreach_erf_native.h> |
778 | #include <ATen/ops/_foreach_erf_ops.h> |
779 | #include <ATen/ops/_foreach_erf_native.h> |
780 | #include <ATen/ops/_foreach_erf_ops.h> |
781 | #include <ATen/ops/_foreach_erf_native.h> |
782 | #include <ATen/ops/_foreach_erf_ops.h> |
783 | #include <ATen/ops/_foreach_erfc_native.h> |
784 | #include <ATen/ops/_foreach_erfc_ops.h> |
785 | #include <ATen/ops/_foreach_erfc_native.h> |
786 | #include <ATen/ops/_foreach_erfc_ops.h> |
787 | #include <ATen/ops/_foreach_erfc_native.h> |
788 | #include <ATen/ops/_foreach_erfc_ops.h> |
789 | #include <ATen/ops/_foreach_log_native.h> |
790 | #include <ATen/ops/_foreach_log_ops.h> |
791 | #include <ATen/ops/_foreach_log_native.h> |
792 | #include <ATen/ops/_foreach_log_ops.h> |
793 | #include <ATen/ops/_foreach_log_native.h> |
794 | #include <ATen/ops/_foreach_log_ops.h> |
795 | #include <ATen/ops/_foreach_sinh_native.h> |
796 | #include <ATen/ops/_foreach_sinh_ops.h> |
797 | #include <ATen/ops/_foreach_sinh_native.h> |
798 | #include <ATen/ops/_foreach_sinh_ops.h> |
799 | #include <ATen/ops/_foreach_sinh_native.h> |
800 | #include <ATen/ops/_foreach_sinh_ops.h> |
801 | #include <ATen/ops/_foreach_lgamma_native.h> |
802 | #include <ATen/ops/_foreach_lgamma_ops.h> |
803 | #include <ATen/ops/_foreach_lgamma_native.h> |
804 | #include <ATen/ops/_foreach_lgamma_ops.h> |
805 | #include <ATen/ops/_foreach_lgamma_native.h> |
806 | #include <ATen/ops/_foreach_lgamma_ops.h> |
807 | #include <ATen/ops/_foreach_lerp_native.h> |
808 | #include <ATen/ops/_foreach_lerp_ops.h> |
809 | #include <ATen/ops/_foreach_lerp_native.h> |
810 | #include <ATen/ops/_foreach_lerp_ops.h> |
811 | #include <ATen/ops/_foreach_lerp_native.h> |
812 | #include <ATen/ops/_foreach_lerp_ops.h> |
813 | #include <ATen/ops/_foreach_lerp_native.h> |
814 | #include <ATen/ops/_foreach_lerp_ops.h> |
815 | #include <ATen/ops/_foreach_lerp_native.h> |
816 | #include <ATen/ops/_foreach_lerp_ops.h> |
817 | #include <ATen/ops/_foreach_lerp_native.h> |
818 | #include <ATen/ops/_foreach_lerp_ops.h> |
819 | #include <ATen/ops/_convert_indices_from_coo_to_csr_native.h> |
820 | #include <ATen/ops/_convert_indices_from_coo_to_csr_ops.h> |
821 | #include <ATen/ops/_convert_indices_from_coo_to_csr_native.h> |
822 | #include <ATen/ops/_convert_indices_from_coo_to_csr_ops.h> |
823 | #include <ATen/ops/multi_margin_loss_backward_native.h> |
824 | #include <ATen/ops/multi_margin_loss_backward_ops.h> |
825 | #include <ATen/ops/multi_margin_loss_backward_native.h> |
826 | #include <ATen/ops/multi_margin_loss_backward_ops.h> |
827 | #include <ATen/ops/nll_loss_forward_native.h> |
828 | #include <ATen/ops/nll_loss_forward_ops.h> |
829 | #include <ATen/ops/nll_loss_forward_native.h> |
830 | #include <ATen/ops/nll_loss_forward_ops.h> |
831 | #include <ATen/ops/hardswish_native.h> |
832 | #include <ATen/ops/hardswish_ops.h> |
833 | #include <ATen/ops/hardswish_native.h> |
834 | #include <ATen/ops/hardswish_ops.h> |
835 | #include <ATen/ops/hardswish_native.h> |
836 | #include <ATen/ops/hardswish_ops.h> |
837 | #include <ATen/ops/hardswish_backward_native.h> |
838 | #include <ATen/ops/hardswish_backward_ops.h> |
839 | #include <ATen/ops/hardswish_backward_native.h> |
840 | #include <ATen/ops/hardswish_backward_ops.h> |
841 | #include <ATen/ops/softshrink_backward_native.h> |
842 | #include <ATen/ops/softshrink_backward_ops.h> |
843 | #include <ATen/ops/softshrink_backward_native.h> |
844 | #include <ATen/ops/softshrink_backward_ops.h> |
845 | #include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_native.h> |
846 | #include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_ops.h> |
847 | #include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_native.h> |
848 | #include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_ops.h> |
849 | #include <ATen/ops/_adaptive_avg_pool2d_backward_native.h> |
850 | #include <ATen/ops/_adaptive_avg_pool2d_backward_ops.h> |
851 | #include <ATen/ops/_adaptive_avg_pool2d_backward_native.h> |
852 | #include <ATen/ops/_adaptive_avg_pool2d_backward_ops.h> |
853 | #include <ATen/ops/adaptive_avg_pool3d_native.h> |
854 | #include <ATen/ops/adaptive_avg_pool3d_ops.h> |
855 | #include <ATen/ops/adaptive_avg_pool3d_native.h> |
856 | #include <ATen/ops/adaptive_avg_pool3d_ops.h> |
857 | #include <ATen/ops/adaptive_max_pool3d_backward_native.h> |
858 | #include <ATen/ops/adaptive_max_pool3d_backward_ops.h> |
859 | #include <ATen/ops/adaptive_max_pool3d_backward_native.h> |
860 | #include <ATen/ops/adaptive_max_pool3d_backward_ops.h> |
861 | #include <ATen/ops/avg_pool2d_native.h> |
862 | #include <ATen/ops/avg_pool2d_ops.h> |
863 | #include <ATen/ops/avg_pool2d_native.h> |
864 | #include <ATen/ops/avg_pool2d_ops.h> |
865 | #include <ATen/ops/avg_pool3d_backward_native.h> |
866 | #include <ATen/ops/avg_pool3d_backward_ops.h> |
867 | #include <ATen/ops/avg_pool3d_backward_native.h> |
868 | #include <ATen/ops/avg_pool3d_backward_ops.h> |
869 | #include <ATen/ops/fractional_max_pool2d_native.h> |
870 | #include <ATen/ops/fractional_max_pool2d_ops.h> |
871 | #include <ATen/ops/fractional_max_pool2d_native.h> |
872 | #include <ATen/ops/fractional_max_pool2d_ops.h> |
873 | #include <ATen/ops/reflection_pad3d_backward_native.h> |
874 | #include <ATen/ops/reflection_pad3d_backward_ops.h> |
875 | #include <ATen/ops/reflection_pad3d_backward_native.h> |
876 | #include <ATen/ops/reflection_pad3d_backward_ops.h> |
877 | #include <ATen/ops/replication_pad2d_native.h> |
878 | #include <ATen/ops/replication_pad2d_ops.h> |
879 | #include <ATen/ops/replication_pad2d_native.h> |
880 | #include <ATen/ops/replication_pad2d_ops.h> |
881 | #include <ATen/ops/_upsample_bilinear2d_aa_backward_native.h> |
882 | #include <ATen/ops/_upsample_bilinear2d_aa_backward_ops.h> |
883 | #include <ATen/ops/_upsample_bilinear2d_aa_backward_native.h> |
884 | #include <ATen/ops/_upsample_bilinear2d_aa_backward_ops.h> |
885 | #include <ATen/ops/_upsample_bicubic2d_aa_native.h> |
886 | #include <ATen/ops/_upsample_bicubic2d_aa_ops.h> |
887 | #include <ATen/ops/_upsample_bicubic2d_aa_native.h> |
888 | #include <ATen/ops/_upsample_bicubic2d_aa_ops.h> |
889 | #include <ATen/ops/upsample_trilinear3d_native.h> |
890 | #include <ATen/ops/upsample_trilinear3d_ops.h> |
891 | #include <ATen/ops/upsample_trilinear3d_native.h> |
892 | #include <ATen/ops/upsample_trilinear3d_ops.h> |
893 | #include <ATen/ops/_upsample_nearest_exact1d_backward_native.h> |
894 | #include <ATen/ops/_upsample_nearest_exact1d_backward_ops.h> |
895 | #include <ATen/ops/_upsample_nearest_exact1d_backward_native.h> |
896 | #include <ATen/ops/_upsample_nearest_exact1d_backward_ops.h> |
897 | #include <ATen/ops/upsample_nearest2d_backward_native.h> |
898 | #include <ATen/ops/upsample_nearest2d_backward_ops.h> |
899 | #include <ATen/ops/upsample_nearest2d_backward_native.h> |
900 | #include <ATen/ops/upsample_nearest2d_backward_ops.h> |
901 | #include <ATen/ops/tanh_backward_native.h> |
902 | #include <ATen/ops/tanh_backward_ops.h> |
903 | #include <ATen/ops/tanh_backward_native.h> |
904 | #include <ATen/ops/tanh_backward_ops.h> |
905 | #include <ATen/ops/_conv_depthwise2d_native.h> |
906 | #include <ATen/ops/_conv_depthwise2d_ops.h> |
907 | #include <ATen/ops/_conv_depthwise2d_native.h> |
908 | #include <ATen/ops/_conv_depthwise2d_ops.h> |
909 | #include <ATen/ops/col2im_native.h> |
910 | #include <ATen/ops/col2im_ops.h> |
911 | #include <ATen/ops/col2im_native.h> |
912 | #include <ATen/ops/col2im_ops.h> |
913 | #include <ATen/ops/column_stack_native.h> |
914 | #include <ATen/ops/column_stack_ops.h> |
915 | #include <ATen/ops/column_stack_native.h> |
916 | #include <ATen/ops/column_stack_ops.h> |
917 | #include <ATen/ops/im2col_native.h> |
918 | #include <ATen/ops/im2col_ops.h> |
919 | #include <ATen/ops/im2col_native.h> |
920 | #include <ATen/ops/im2col_ops.h> |
921 | #include <ATen/ops/isinf_native.h> |
922 | #include <ATen/ops/isinf_ops.h> |
923 | #include <ATen/ops/isinf_native.h> |
924 | #include <ATen/ops/isinf_ops.h> |
925 | #include <ATen/ops/isneginf_native.h> |
926 | #include <ATen/ops/isneginf_ops.h> |
927 | #include <ATen/ops/isneginf_native.h> |
928 | #include <ATen/ops/isneginf_ops.h> |
929 | #include <ATen/ops/special_expm1_native.h> |
930 | #include <ATen/ops/special_expm1_ops.h> |
931 | #include <ATen/ops/special_expm1_native.h> |
932 | #include <ATen/ops/special_expm1_ops.h> |
933 | #include <ATen/ops/special_erf_native.h> |
934 | #include <ATen/ops/special_erf_ops.h> |
935 | #include <ATen/ops/special_erf_native.h> |
936 | #include <ATen/ops/special_erf_ops.h> |
937 | #include <ATen/ops/special_logsumexp_native.h> |
938 | #include <ATen/ops/special_logsumexp_ops.h> |
939 | #include <ATen/ops/special_logsumexp_native.h> |
940 | #include <ATen/ops/special_logsumexp_ops.h> |
941 | #include <ATen/ops/special_log1p_native.h> |
942 | #include <ATen/ops/special_log1p_ops.h> |
943 | #include <ATen/ops/special_log1p_native.h> |
944 | #include <ATen/ops/special_log1p_ops.h> |
945 | #include <ATen/ops/special_gammaincc_native.h> |
946 | #include <ATen/ops/special_gammaincc_ops.h> |
947 | #include <ATen/ops/special_gammaincc_native.h> |
948 | #include <ATen/ops/special_gammaincc_ops.h> |
949 | #include <ATen/ops/special_multigammaln_native.h> |
950 | #include <ATen/ops/special_multigammaln_ops.h> |
951 | #include <ATen/ops/special_multigammaln_native.h> |
952 | #include <ATen/ops/special_multigammaln_ops.h> |
953 | #include <ATen/ops/fft_rfft2_native.h> |
954 | #include <ATen/ops/fft_rfft2_ops.h> |
955 | #include <ATen/ops/fft_rfft2_native.h> |
956 | #include <ATen/ops/fft_rfft2_ops.h> |
957 | #include <ATen/ops/fft_irfft2_native.h> |
958 | #include <ATen/ops/fft_irfft2_ops.h> |
959 | #include <ATen/ops/fft_irfft2_native.h> |
960 | #include <ATen/ops/fft_irfft2_ops.h> |
961 | #include <ATen/ops/fft_ihfft2_native.h> |
962 | #include <ATen/ops/fft_ihfft2_ops.h> |
963 | #include <ATen/ops/fft_ihfft2_native.h> |
964 | #include <ATen/ops/fft_ihfft2_ops.h> |
965 | #include <ATen/ops/fft_fftn_native.h> |
966 | #include <ATen/ops/fft_fftn_ops.h> |
967 | #include <ATen/ops/fft_fftn_native.h> |
968 | #include <ATen/ops/fft_fftn_ops.h> |
969 | #include <ATen/ops/fft_irfftn_native.h> |
970 | #include <ATen/ops/fft_irfftn_ops.h> |
971 | #include <ATen/ops/fft_irfftn_native.h> |
972 | #include <ATen/ops/fft_irfftn_ops.h> |
973 | #include <ATen/ops/linalg_lu_factor_native.h> |
974 | #include <ATen/ops/linalg_lu_factor_ops.h> |
975 | #include <ATen/ops/linalg_lu_factor_native.h> |
976 | #include <ATen/ops/linalg_lu_factor_ops.h> |
977 | #include <ATen/ops/linalg_lu_factor_ex_native.h> |
978 | #include <ATen/ops/linalg_lu_factor_ex_ops.h> |
979 | #include <ATen/ops/linalg_lu_factor_ex_native.h> |
980 | #include <ATen/ops/linalg_lu_factor_ex_ops.h> |
981 | #include <ATen/ops/linalg_ldl_factor_native.h> |
982 | #include <ATen/ops/linalg_ldl_factor_ops.h> |
983 | #include <ATen/ops/linalg_ldl_factor_native.h> |
984 | #include <ATen/ops/linalg_ldl_factor_ops.h> |
985 | #include <ATen/ops/linalg_ldl_solve_native.h> |
986 | #include <ATen/ops/linalg_ldl_solve_ops.h> |
987 | #include <ATen/ops/linalg_ldl_solve_native.h> |
988 | #include <ATen/ops/linalg_ldl_solve_ops.h> |
989 | #include <ATen/ops/_linalg_slogdet_native.h> |
990 | #include <ATen/ops/_linalg_slogdet_ops.h> |
991 | #include <ATen/ops/_linalg_slogdet_native.h> |
992 | #include <ATen/ops/_linalg_slogdet_ops.h> |
993 | #include <ATen/ops/linalg_eigvals_native.h> |
994 | #include <ATen/ops/linalg_eigvals_ops.h> |
995 | #include <ATen/ops/linalg_eigvals_native.h> |
996 | #include <ATen/ops/linalg_eigvals_ops.h> |
997 | #include <ATen/ops/linalg_eigh_native.h> |
998 | #include <ATen/ops/linalg_eigh_ops.h> |
999 | #include <ATen/ops/linalg_eigh_native.h> |
1000 | #include <ATen/ops/linalg_eigh_ops.h> |
1001 | #include <ATen/ops/linalg_householder_product_native.h> |
1002 | #include <ATen/ops/linalg_householder_product_ops.h> |
1003 | #include <ATen/ops/linalg_householder_product_native.h> |
1004 | #include <ATen/ops/linalg_householder_product_ops.h> |
1005 | #include <ATen/ops/ger_native.h> |
1006 | #include <ATen/ops/ger_ops.h> |
1007 | #include <ATen/ops/ger_native.h> |
1008 | #include <ATen/ops/ger_ops.h> |
1009 | #include <ATen/ops/linalg_norm_native.h> |
1010 | #include <ATen/ops/linalg_norm_ops.h> |
1011 | #include <ATen/ops/linalg_norm_native.h> |
1012 | #include <ATen/ops/linalg_norm_ops.h> |
1013 | #include <ATen/ops/linalg_norm_native.h> |
1014 | #include <ATen/ops/linalg_norm_ops.h> |
1015 | #include <ATen/ops/linalg_norm_native.h> |
1016 | #include <ATen/ops/linalg_norm_ops.h> |
1017 | #include <ATen/ops/linalg_vector_norm_native.h> |
1018 | #include <ATen/ops/linalg_vector_norm_ops.h> |
1019 | #include <ATen/ops/linalg_vector_norm_native.h> |
1020 | #include <ATen/ops/linalg_vector_norm_ops.h> |
1021 | #include <ATen/ops/_linalg_solve_ex_native.h> |
1022 | #include <ATen/ops/_linalg_solve_ex_ops.h> |
1023 | #include <ATen/ops/_linalg_solve_ex_native.h> |
1024 | #include <ATen/ops/_linalg_solve_ex_ops.h> |
1025 | #include <ATen/ops/linalg_solve_native.h> |
1026 | #include <ATen/ops/linalg_solve_ops.h> |
1027 | #include <ATen/ops/linalg_solve_native.h> |
1028 | #include <ATen/ops/linalg_solve_ops.h> |
1029 | #include <ATen/ops/linalg_multi_dot_native.h> |
1030 | #include <ATen/ops/linalg_multi_dot_ops.h> |
1031 | #include <ATen/ops/linalg_multi_dot_native.h> |
1032 | #include <ATen/ops/linalg_multi_dot_ops.h> |
1033 | #include <ATen/ops/_test_optional_filled_intlist_native.h> |
1034 | #include <ATen/ops/_test_optional_filled_intlist_ops.h> |
1035 | #include <ATen/ops/_test_optional_filled_intlist_native.h> |
1036 | #include <ATen/ops/_test_optional_filled_intlist_ops.h> |
1037 | #include <ATen/ops/_test_autograd_multiple_dispatch_native.h> |
1038 | #include <ATen/ops/_test_autograd_multiple_dispatch_ops.h> |
1039 | #include <ATen/ops/_test_autograd_multiple_dispatch_native.h> |
1040 | #include <ATen/ops/_test_autograd_multiple_dispatch_ops.h> |
1041 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_native.h> |
1042 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h> |
1043 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_native.h> |
1044 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h> |
1045 | #include <ATen/ops/segment_reduce_native.h> |
1046 | #include <ATen/ops/segment_reduce_ops.h> |
1047 | #include <ATen/ops/segment_reduce_native.h> |
1048 | #include <ATen/ops/segment_reduce_ops.h> |
1049 | #include <ATen/ops/_nested_tensor_from_tensor_list_native.h> |
1050 | #include <ATen/ops/_nested_tensor_from_tensor_list_ops.h> |
1051 | #include <ATen/ops/_nested_tensor_from_tensor_list_native.h> |
1052 | #include <ATen/ops/_nested_tensor_from_tensor_list_ops.h> |
1053 | #include <ATen/ops/diagonal_copy_native.h> |
1054 | #include <ATen/ops/diagonal_copy_ops.h> |
1055 | #include <ATen/ops/diagonal_copy_native.h> |
1056 | #include <ATen/ops/diagonal_copy_ops.h> |
1057 | #include <ATen/ops/detach_copy_native.h> |
1058 | #include <ATen/ops/detach_copy_ops.h> |
1059 | #include <ATen/ops/detach_copy_native.h> |
1060 | #include <ATen/ops/detach_copy_ops.h> |
1061 | #include <ATen/ops/slice_copy_native.h> |
1062 | #include <ATen/ops/slice_copy_ops.h> |
1063 | #include <ATen/ops/slice_copy_native.h> |
1064 | #include <ATen/ops/slice_copy_ops.h> |
1065 | #include <ATen/ops/transpose_copy_native.h> |
1066 | #include <ATen/ops/transpose_copy_ops.h> |
1067 | #include <ATen/ops/transpose_copy_native.h> |
1068 | #include <ATen/ops/transpose_copy_ops.h> |
1069 | #include <ATen/ops/indices_copy_native.h> |
1070 | #include <ATen/ops/indices_copy_ops.h> |
1071 | #include <ATen/ops/indices_copy_native.h> |
1072 | #include <ATen/ops/indices_copy_ops.h> |
1073 | #include <ATen/ops/row_indices_copy_native.h> |
1074 | #include <ATen/ops/row_indices_copy_ops.h> |
1075 | #include <ATen/ops/row_indices_copy_native.h> |
1076 | #include <ATen/ops/row_indices_copy_ops.h> |
1077 | #include <ATen/ops/_triton_multi_head_attention_native.h> |
1078 | #include <ATen/ops/_triton_multi_head_attention_ops.h> |
1079 | #include <ATen/ops/_triton_multi_head_attention_native.h> |
1080 | #include <ATen/ops/_triton_multi_head_attention_ops.h> |
1081 | #include <ATen/ops/special_bessel_j1_native.h> |
1082 | #include <ATen/ops/special_bessel_j1_ops.h> |
1083 | #include <ATen/ops/special_bessel_j1_native.h> |
1084 | #include <ATen/ops/special_bessel_j1_ops.h> |
1085 | #include <ATen/ops/special_bessel_y1_native.h> |
1086 | #include <ATen/ops/special_bessel_y1_ops.h> |
1087 | #include <ATen/ops/special_bessel_y1_native.h> |
1088 | #include <ATen/ops/special_bessel_y1_ops.h> |
1089 | #include <ATen/ops/special_legendre_polynomial_p_native.h> |
1090 | #include <ATen/ops/special_legendre_polynomial_p_ops.h> |
1091 | #include <ATen/ops/special_legendre_polynomial_p_native.h> |
1092 | #include <ATen/ops/special_legendre_polynomial_p_ops.h> |
1093 | #include <ATen/ops/special_legendre_polynomial_p_native.h> |
1094 | #include <ATen/ops/special_legendre_polynomial_p_ops.h> |
1095 | #include <ATen/ops/special_legendre_polynomial_p_native.h> |
1096 | #include <ATen/ops/special_legendre_polynomial_p_ops.h> |
1097 | #include <ATen/ops/special_legendre_polynomial_p_native.h> |
1098 | #include <ATen/ops/special_legendre_polynomial_p_ops.h> |
1099 | #include <ATen/ops/special_legendre_polynomial_p_native.h> |
1100 | #include <ATen/ops/special_legendre_polynomial_p_ops.h> |
1101 | #include <ATen/ops/special_modified_bessel_i0_native.h> |
1102 | #include <ATen/ops/special_modified_bessel_i0_ops.h> |
1103 | #include <ATen/ops/special_modified_bessel_i0_native.h> |
1104 | #include <ATen/ops/special_modified_bessel_i0_ops.h> |
1105 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h> |
1106 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h> |
1107 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h> |
1108 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h> |
1109 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h> |
1110 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h> |
1111 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h> |
1112 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h> |
1113 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h> |
1114 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h> |
1115 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h> |
1116 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h> |
1117 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h> |
1118 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h> |
1119 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h> |
1120 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h> |
1121 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h> |
1122 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h> |
1123 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h> |
1124 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h> |
1125 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h> |
1126 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h> |
1127 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h> |
1128 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h> |
1129 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h> |
1130 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h> |
1131 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h> |
1132 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h> |
1133 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h> |
1134 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h> |
1135 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h> |
1136 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h> |
1137 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h> |
1138 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h> |
1139 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h> |
1140 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h> |
1141 | #include <ATen/ops/_fused_adam_native.h> |
1142 | #include <ATen/ops/_fused_adam_ops.h> |
1143 | #include <ATen/ops/_fused_adam_native.h> |
1144 | #include <ATen/ops/_fused_adam_ops.h> |
1145 | #include <ATen/ops/_fused_adam_native.h> |
1146 | #include <ATen/ops/_fused_adam_ops.h> |
1147 | #include <ATen/ops/rename_native.h> |
1148 | #include <ATen/ops/rename_ops.h> |
1149 | #include <ATen/ops/imag_native.h> |
1150 | #include <ATen/ops/imag_ops.h> |
1151 | #include <ATen/ops/_conj_native.h> |
1152 | #include <ATen/ops/_conj_ops.h> |
1153 | #include <ATen/ops/_conj_copy_native.h> |
1154 | #include <ATen/ops/_conj_copy_ops.h> |
1155 | #include <ATen/ops/conj_native.h> |
1156 | #include <ATen/ops/conj_ops.h> |
1157 | #include <ATen/ops/resolve_conj_native.h> |
1158 | #include <ATen/ops/resolve_conj_ops.h> |
1159 | #include <ATen/ops/as_strided_native.h> |
1160 | #include <ATen/ops/as_strided_ops.h> |
1161 | #include <ATen/ops/as_strided_copy_native.h> |
1162 | #include <ATen/ops/as_strided_copy_ops.h> |
1163 | #include <ATen/ops/_sparse_broadcast_to_native.h> |
1164 | #include <ATen/ops/_sparse_broadcast_to_ops.h> |
1165 | #include <ATen/ops/_sparse_broadcast_to_copy_native.h> |
1166 | #include <ATen/ops/_sparse_broadcast_to_copy_ops.h> |
1167 | #include <ATen/ops/chunk_native.h> |
1168 | #include <ATen/ops/chunk_ops.h> |
1169 | #include <ATen/ops/tensor_split_native.h> |
1170 | #include <ATen/ops/tensor_split_ops.h> |
1171 | #include <ATen/ops/tensor_split_native.h> |
1172 | #include <ATen/ops/tensor_split_ops.h> |
1173 | #include <ATen/ops/tensor_split_native.h> |
1174 | #include <ATen/ops/tensor_split_ops.h> |
1175 | #include <ATen/ops/expand_as_native.h> |
1176 | #include <ATen/ops/expand_as_ops.h> |
1177 | #include <ATen/ops/unflatten_native.h> |
1178 | #include <ATen/ops/unflatten_ops.h> |
1179 | #include <ATen/ops/unflatten_native.h> |
1180 | #include <ATen/ops/unflatten_ops.h> |
1181 | #include <ATen/ops/permute_native.h> |
1182 | #include <ATen/ops/permute_ops.h> |
1183 | #include <ATen/ops/permute_copy_native.h> |
1184 | #include <ATen/ops/permute_copy_ops.h> |
1185 | #include <ATen/ops/movedim_native.h> |
1186 | #include <ATen/ops/movedim_ops.h> |
1187 | #include <ATen/ops/movedim_native.h> |
1188 | #include <ATen/ops/movedim_ops.h> |
1189 | #include <ATen/ops/mH_native.h> |
1190 | #include <ATen/ops/mH_ops.h> |
1191 | #include <ATen/ops/pin_memory_native.h> |
1192 | #include <ATen/ops/pin_memory_ops.h> |
1193 | #include <ATen/ops/_reshape_alias_native.h> |
1194 | #include <ATen/ops/_reshape_alias_ops.h> |
1195 | #include <ATen/ops/_reshape_alias_copy_native.h> |
1196 | #include <ATen/ops/_reshape_alias_copy_ops.h> |
1197 | #include <ATen/ops/detach_native.h> |
1198 | #include <ATen/ops/detach_ops.h> |
1199 | #include <ATen/ops/detach_copy_native.h> |
1200 | #include <ATen/ops/detach_copy_ops.h> |
1201 | #include <ATen/ops/split_native.h> |
1202 | #include <ATen/ops/split_ops.h> |
1203 | #include <ATen/ops/split_copy_native.h> |
1204 | #include <ATen/ops/split_copy_ops.h> |
1205 | #include <ATen/ops/split_native.h> |
1206 | #include <ATen/ops/split_ops.h> |
1207 | #include <ATen/ops/dsplit_native.h> |
1208 | #include <ATen/ops/dsplit_ops.h> |
1209 | #include <ATen/ops/dsplit_native.h> |
1210 | #include <ATen/ops/dsplit_ops.h> |
1211 | #include <ATen/ops/positive_native.h> |
1212 | #include <ATen/ops/positive_ops.h> |
1213 | #include <ATen/ops/values_native.h> |
1214 | #include <ATen/ops/values_ops.h> |
1215 | #include <ATen/ops/values_copy_native.h> |
1216 | #include <ATen/ops/values_copy_ops.h> |
1217 | #include <ATen/ops/row_indices_native.h> |
1218 | #include <ATen/ops/row_indices_ops.h> |
1219 | #include <ATen/ops/row_indices_copy_native.h> |
1220 | #include <ATen/ops/row_indices_copy_ops.h> |
1221 | #include <ATen/ops/lift_fresh_native.h> |
1222 | #include <ATen/ops/lift_fresh_ops.h> |
1223 | #include <ATen/ops/lift_fresh_copy_native.h> |
1224 | #include <ATen/ops/lift_fresh_copy_ops.h> |
1225 | #include <ATen/ops/swapdims_native.h> |
1226 | #include <ATen/ops/swapdims_ops.h> |
1227 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_native.h> |
1228 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_ops.h> |
1229 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_native.h> |
1230 | #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h> |
1231 | #include <ATen/ops/_cast_Double_native.h> |
1232 | #include <ATen/ops/_cast_Double_ops.h> |
1233 | #include <ATen/ops/_cast_Int_native.h> |
1234 | #include <ATen/ops/_cast_Int_ops.h> |
1235 | #include <ATen/ops/_cast_Short_native.h> |
1236 | #include <ATen/ops/_cast_Short_ops.h> |
1237 | #include <ATen/ops/output_nr_native.h> |
1238 | #include <ATen/ops/output_nr_ops.h> |
1239 | #include <ATen/ops/_cudnn_ctc_loss_native.h> |
1240 | #include <ATen/ops/_cudnn_ctc_loss_ops.h> |
1241 | #include <ATen/ops/_use_cudnn_rnn_flatten_weight_native.h> |
1242 | #include <ATen/ops/_use_cudnn_rnn_flatten_weight_ops.h> |
1243 | #include <ATen/ops/_sobol_engine_initialize_state_native.h> |
1244 | #include <ATen/ops/_sobol_engine_initialize_state_ops.h> |
1245 | #include <ATen/ops/_shape_as_tensor_native.h> |
1246 | #include <ATen/ops/_shape_as_tensor_ops.h> |
1247 | #include <ATen/ops/_dim_arange_native.h> |
1248 | #include <ATen/ops/_dim_arange_ops.h> |
1249 | #include <ATen/ops/cudnn_is_acceptable_native.h> |
1250 | #include <ATen/ops/cudnn_is_acceptable_ops.h> |
1251 | #include <ATen/ops/_convolution_native.h> |
1252 | #include <ATen/ops/_convolution_ops.h> |
1253 | #include <ATen/ops/conv2d_native.h> |
1254 | #include <ATen/ops/conv2d_ops.h> |
1255 | #include <ATen/ops/conv2d_native.h> |
1256 | #include <ATen/ops/conv2d_ops.h> |
1257 | #include <ATen/ops/conv_tbc_backward_native.h> |
1258 | #include <ATen/ops/conv_tbc_backward_ops.h> |
1259 | #include <ATen/ops/cov_native.h> |
1260 | #include <ATen/ops/cov_ops.h> |
1261 | #include <ATen/ops/_cummin_helper_native.h> |
1262 | #include <ATen/ops/_cummin_helper_ops.h> |
1263 | #include <ATen/ops/divide_native.h> |
1264 | #include <ATen/ops/divide_ops.h> |
1265 | #include <ATen/ops/divide_native.h> |
1266 | #include <ATen/ops/divide_ops.h> |
1267 | #include <ATen/ops/divide_native.h> |
1268 | #include <ATen/ops/divide_ops.h> |
1269 | #include <ATen/ops/divide_native.h> |
1270 | #include <ATen/ops/divide_ops.h> |
1271 | #include <ATen/ops/true_divide_native.h> |
1272 | #include <ATen/ops/true_divide_ops.h> |
1273 | #include <ATen/ops/true_divide_native.h> |
1274 | #include <ATen/ops/true_divide_ops.h> |
1275 | #include <ATen/ops/embedding_backward_native.h> |
1276 | #include <ATen/ops/embedding_backward_ops.h> |
1277 | #include <ATen/ops/embedding_bag_native.h> |
1278 | #include <ATen/ops/embedding_bag_ops.h> |
1279 | #include <ATen/ops/embedding_bag_native.h> |
1280 | #include <ATen/ops/embedding_bag_ops.h> |
1281 | #include <ATen/ops/_embedding_bag_backward_native.h> |
1282 | #include <ATen/ops/_embedding_bag_backward_ops.h> |
1283 | #include <ATen/ops/_cufft_get_plan_cache_size_native.h> |
1284 | #include <ATen/ops/_cufft_get_plan_cache_size_ops.h> |
1285 | #include <ATen/ops/_cufft_get_plan_cache_max_size_native.h> |
1286 | #include <ATen/ops/_cufft_get_plan_cache_max_size_ops.h> |
1287 | #include <ATen/ops/_is_zerotensor_native.h> |
1288 | #include <ATen/ops/_is_zerotensor_ops.h> |
1289 | #include <ATen/ops/is_inference_native.h> |
1290 | #include <ATen/ops/is_inference_ops.h> |
1291 | #include <ATen/ops/kl_div_native.h> |
1292 | #include <ATen/ops/kl_div_ops.h> |
1293 | #include <ATen/ops/margin_ranking_loss_native.h> |
1294 | #include <ATen/ops/margin_ranking_loss_ops.h> |
1295 | #include <ATen/ops/matrix_exp_native.h> |
1296 | #include <ATen/ops/matrix_exp_ops.h> |
1297 | #include <ATen/ops/miopen_convolution_relu_native.h> |
1298 | #include <ATen/ops/miopen_convolution_relu_ops.h> |
1299 | #include <ATen/ops/_sparse_mm_native.h> |
1300 | #include <ATen/ops/_sparse_mm_ops.h> |
1301 | #include <ATen/ops/_sparse_mm_native.h> |
1302 | #include <ATen/ops/_sparse_mm_ops.h> |
1303 | #include <ATen/ops/_nnpack_available_native.h> |
1304 | #include <ATen/ops/_nnpack_available_ops.h> |
1305 | #include <ATen/ops/_reshape_copy_native.h> |
1306 | #include <ATen/ops/_reshape_copy_ops.h> |
1307 | #include <ATen/ops/relu6_native.h> |
1308 | #include <ATen/ops/relu6_ops.h> |
1309 | #include <ATen/ops/relu6_native.h> |
1310 | #include <ATen/ops/relu6_ops.h> |
1311 | #include <ATen/ops/_prelu_kernel_backward_native.h> |
1312 | #include <ATen/ops/_prelu_kernel_backward_ops.h> |
1313 | #include <ATen/ops/selu_native.h> |
1314 | #include <ATen/ops/selu_ops.h> |
1315 | #include <ATen/ops/selu_native.h> |
1316 | #include <ATen/ops/selu_ops.h> |
1317 | #include <ATen/ops/istft_native.h> |
1318 | #include <ATen/ops/istft_ops.h> |
1319 | #include <ATen/ops/_nested_sum_backward_native.h> |
1320 | #include <ATen/ops/_nested_sum_backward_ops.h> |
1321 | #include <ATen/ops/std_mean_native.h> |
1322 | #include <ATen/ops/std_mean_ops.h> |
1323 | #include <ATen/ops/std_mean_native.h> |
1324 | #include <ATen/ops/std_mean_ops.h> |
1325 | #include <ATen/ops/std_mean_native.h> |
1326 | #include <ATen/ops/std_mean_ops.h> |
1327 | #include <ATen/ops/std_mean_native.h> |
1328 | #include <ATen/ops/std_mean_ops.h> |
1329 | #include <ATen/ops/flipud_native.h> |
1330 | #include <ATen/ops/flipud_ops.h> |
1331 | #include <ATen/ops/_nested_tensor_from_mask_left_aligned_native.h> |
1332 | #include <ATen/ops/_nested_tensor_from_mask_left_aligned_ops.h> |
1333 | #include <ATen/ops/var_mean_native.h> |
1334 | #include <ATen/ops/var_mean_ops.h> |
1335 | #include <ATen/ops/var_mean_native.h> |
1336 | #include <ATen/ops/var_mean_ops.h> |
1337 | #include <ATen/ops/var_mean_native.h> |
1338 | #include <ATen/ops/var_mean_ops.h> |
1339 | #include <ATen/ops/var_mean_native.h> |
1340 | #include <ATen/ops/var_mean_ops.h> |
1341 | #include <ATen/ops/norm_except_dim_native.h> |
1342 | #include <ATen/ops/norm_except_dim_ops.h> |
1343 | #include <ATen/ops/_sparse_compressed_tensor_unsafe_native.h> |
1344 | #include <ATen/ops/_sparse_compressed_tensor_unsafe_ops.h> |
1345 | #include <ATen/ops/sparse_coo_tensor_native.h> |
1346 | #include <ATen/ops/sparse_coo_tensor_ops.h> |
1347 | #include <ATen/ops/sparse_coo_tensor_native.h> |
1348 | #include <ATen/ops/sparse_coo_tensor_ops.h> |
1349 | #include <ATen/ops/_validate_sparse_bsc_tensor_args_native.h> |
1350 | #include <ATen/ops/_validate_sparse_bsc_tensor_args_ops.h> |
1351 | #include <ATen/ops/dense_dim_native.h> |
1352 | #include <ATen/ops/dense_dim_ops.h> |
1353 | #include <ATen/ops/_dimV_native.h> |
1354 | #include <ATen/ops/_dimV_ops.h> |
1355 | #include <ATen/ops/to_mkldnn_backward_native.h> |
1356 | #include <ATen/ops/to_mkldnn_backward_ops.h> |
1357 | #include <ATen/ops/q_zero_point_native.h> |
1358 | #include <ATen/ops/q_zero_point_ops.h> |
1359 | #include <ATen/ops/qscheme_native.h> |
1360 | #include <ATen/ops/qscheme_ops.h> |
1361 | #include <ATen/ops/fake_quantize_per_tensor_affine_native.h> |
1362 | #include <ATen/ops/fake_quantize_per_tensor_affine_ops.h> |
1363 | #include <ATen/ops/fake_quantize_per_tensor_affine_native.h> |
1364 | #include <ATen/ops/fake_quantize_per_tensor_affine_ops.h> |
1365 | #include <ATen/ops/fused_moving_avg_obs_fake_quant_native.h> |
1366 | #include <ATen/ops/fused_moving_avg_obs_fake_quant_ops.h> |
1367 | #include <ATen/ops/_choose_qparams_per_tensor_native.h> |
1368 | #include <ATen/ops/_choose_qparams_per_tensor_ops.h> |
1369 | #include <ATen/ops/meshgrid_native.h> |
1370 | #include <ATen/ops/meshgrid_ops.h> |
1371 | #include <ATen/ops/meshgrid_native.h> |
1372 | #include <ATen/ops/meshgrid_ops.h> |
1373 | #include <ATen/ops/cartesian_prod_native.h> |
1374 | #include <ATen/ops/cartesian_prod_ops.h> |
1375 | #include <ATen/ops/can_cast_native.h> |
1376 | #include <ATen/ops/can_cast_ops.h> |
1377 | #include <ATen/ops/promote_types_native.h> |
1378 | #include <ATen/ops/promote_types_ops.h> |
1379 | #include <ATen/ops/_local_scalar_dense_native.h> |
1380 | #include <ATen/ops/_local_scalar_dense_ops.h> |
1381 | #include <ATen/ops/rnn_tanh_native.h> |
1382 | #include <ATen/ops/rnn_tanh_ops.h> |
1383 | #include <ATen/ops/rnn_tanh_native.h> |
1384 | #include <ATen/ops/rnn_tanh_ops.h> |
1385 | #include <ATen/ops/gru_cell_native.h> |
1386 | #include <ATen/ops/gru_cell_ops.h> |
1387 | #include <ATen/ops/rnn_relu_cell_native.h> |
1388 | #include <ATen/ops/rnn_relu_cell_ops.h> |
1389 | #include <ATen/ops/_pad_packed_sequence_native.h> |
1390 | #include <ATen/ops/_pad_packed_sequence_ops.h> |
1391 | #include <ATen/ops/is_set_to_native.h> |
1392 | #include <ATen/ops/is_set_to_ops.h> |
1393 | #include <ATen/ops/gather_backward_native.h> |
1394 | #include <ATen/ops/gather_backward_ops.h> |
1395 | #include <ATen/ops/_gather_sparse_backward_native.h> |
1396 | #include <ATen/ops/_gather_sparse_backward_ops.h> |
1397 | #include <ATen/ops/linalg_vander_native.h> |
1398 | #include <ATen/ops/linalg_vander_ops.h> |
1399 | #include <ATen/ops/argsort_native.h> |
1400 | #include <ATen/ops/argsort_ops.h> |
1401 | #include <ATen/ops/argsort_native.h> |
1402 | #include <ATen/ops/argsort_ops.h> |
1403 | #include <ATen/ops/_pad_circular_native.h> |
1404 | #include <ATen/ops/_pad_circular_ops.h> |
1405 | #include <ATen/ops/upsample_trilinear3d_native.h> |
1406 | #include <ATen/ops/upsample_trilinear3d_ops.h> |
1407 | #include <ATen/ops/_upsample_bicubic2d_aa_native.h> |
1408 | #include <ATen/ops/_upsample_bicubic2d_aa_ops.h> |
1409 | #include <ATen/ops/record_stream_native.h> |
1410 | #include <ATen/ops/record_stream_ops.h> |
1411 | #include <ATen/ops/special_softmax_native.h> |
1412 | #include <ATen/ops/special_softmax_ops.h> |
1413 | #include <ATen/ops/fft_fftshift_native.h> |
1414 | #include <ATen/ops/fft_fftshift_ops.h> |
1415 | #include <ATen/ops/nested_to_padded_tensor_native.h> |
1416 | #include <ATen/ops/nested_to_padded_tensor_ops.h> |
1417 | #include <ATen/ops/_test_ambiguous_defaults_native.h> |
1418 | #include <ATen/ops/_test_ambiguous_defaults_ops.h> |
1419 | #include <ATen/ops/_test_ambiguous_defaults_native.h> |
1420 | #include <ATen/ops/_test_ambiguous_defaults_ops.h> |
1421 | #include <ATen/ops/_test_autograd_multiple_dispatch_native.h> |
1422 | #include <ATen/ops/_test_autograd_multiple_dispatch_ops.h> |
1423 | #include <ATen/ops/pad_sequence_native.h> |
1424 | #include <ATen/ops/pad_sequence_ops.h> |
1425 | #include <ATen/ops/flatten_dense_tensors_native.h> |
1426 | #include <ATen/ops/flatten_dense_tensors_ops.h> |
1427 | #include <ATen/ops/_scaled_dot_product_attention_native.h> |
1428 | #include <ATen/ops/_scaled_dot_product_attention_ops.h> |
1429 | #include <ATen/ops/_scaled_dot_product_efficient_attention_backward_native.h> |
1430 | #include <ATen/ops/_scaled_dot_product_efficient_attention_backward_ops.h> |
1431 | #include <ATen/ops/_chunk_grad_outputs_efficient_attention_native.h> |
1432 | #include <ATen/ops/_chunk_grad_outputs_efficient_attention_ops.h> |
1433 | #endif |
1434 | |
1435 | namespace at { |
1436 | namespace functionalization { |
1437 | |
1438 | // This keyset is used by functionalization when it calls into meta kernels |
1439 | // to accurately propagate stride metadata. |
1440 | // Exclude any modes: the purpose of calling into meta kernels is only as an implementation |
1441 | // detail to perform shape inference, and we don't want any modal keys to run. |
1442 | // Specifically, we want to prevent functionalization and Python modes from running. |
1443 | constexpr auto exclude_keys_for_meta_dispatch = |
1444 | c10::functorch_transforms_ks | |
1445 | c10::DispatchKeySet({ |
1446 | c10::DispatchKey::FuncTorchDynamicLayerBackMode, |
1447 | c10::DispatchKey::FuncTorchDynamicLayerFrontMode, |
1448 | c10::DispatchKey::Python |
1449 | }); |
1450 | |
1451 | |
1452 | inline Tensor to_meta(const Tensor& t) { |
1453 | if (!t.defined()) return t; |
1454 | return at::native::empty_strided_meta_symint(t.sym_sizes(), t.sym_strides(), |
1455 | /*dtype=*/c10::make_optional(t.scalar_type()), /*layout=*/c10::make_optional(t.layout()), |
1456 | /*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt); |
1457 | } |
1458 | |
1459 | inline c10::optional<Tensor> to_meta(const c10::optional<Tensor>& t) { |
1460 | if (t.has_value()) { |
1461 | return c10::make_optional<Tensor>(to_meta(*t)); |
1462 | } |
1463 | return c10::nullopt; |
1464 | } |
1465 | |
1466 | inline std::vector<Tensor> to_meta(at::ITensorListRef t_list) { |
1467 | std::vector<Tensor> outputs; |
1468 | outputs.reserve(t_list.size()); |
1469 | for (const auto& tensor : t_list) { |
1470 | outputs.push_back(to_meta(tensor)); |
1471 | } |
1472 | return outputs; |
1473 | } |
1474 | |
1475 | inline c10::List<Tensor> to_meta(const c10::List<Tensor>& t_list) { |
1476 | c10::List<Tensor> outputs; |
1477 | outputs.reserve(t_list.size()); |
1478 | for (const auto i : c10::irange(t_list.size())) { |
1479 | outputs.push_back(to_meta(t_list[i])); |
1480 | } |
1481 | return outputs; |
1482 | } |
1483 | |
1484 | inline c10::List<c10::optional<Tensor>> to_meta(const c10::List<c10::optional<Tensor>>& t_list) { |
1485 | c10::List<c10::optional<Tensor>> outputs; |
1486 | outputs.reserve(t_list.size()); |
1487 | for (const auto i : c10::irange(t_list.size())) { |
1488 | outputs.push_back(to_meta(t_list[i])); |
1489 | } |
1490 | return outputs; |
1491 | } |
1492 | |
1493 | |
1494 | |
1495 | ::std::tuple<at::Tensor &,at::Tensor &> _cudnn_ctc_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { |
1496 | if (false) { |
1497 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1498 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1499 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1500 | auto log_probs_meta = to_meta(log_probs); |
1501 | auto targets_meta = to_meta(targets); |
1502 | auto out0_meta = to_meta(out0); |
1503 | auto out1_meta = to_meta(out1); |
1504 | at::AutoDispatchSkipFunctionalize func_guard; |
1505 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1506 | at::_ops::_cudnn_ctc_loss_out::call(log_probs_meta, targets_meta, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0_meta, out1_meta); |
1507 | } |
1508 | |
1509 | at::Tensor log_probs_; |
1510 | if (at::functionalization::impl::isFunctionalTensor(log_probs)) { |
1511 | at::functionalization::impl::sync(log_probs); |
1512 | log_probs_ = at::functionalization::impl::from_functional_tensor(log_probs); |
1513 | } else { |
1514 | log_probs_ = log_probs; |
1515 | } |
1516 | |
1517 | at::Tensor targets_; |
1518 | if (at::functionalization::impl::isFunctionalTensor(targets)) { |
1519 | at::functionalization::impl::sync(targets); |
1520 | targets_ = at::functionalization::impl::from_functional_tensor(targets); |
1521 | } else { |
1522 | targets_ = targets; |
1523 | } |
1524 | |
1525 | at::Tensor out0_; |
1526 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
1527 | at::functionalization::impl::sync(out0); |
1528 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
1529 | } else { |
1530 | out0_ = out0; |
1531 | } |
1532 | |
1533 | at::Tensor out1_; |
1534 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
1535 | at::functionalization::impl::sync(out1); |
1536 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
1537 | } else { |
1538 | out1_ = out1; |
1539 | } |
1540 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
1541 | if ((false || at::functionalization::impl::isFunctionalTensor(log_probs) || at::functionalization::impl::isFunctionalTensor(targets))) { |
1542 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1543 | TORCH_INTERNAL_ASSERT(false, |
1544 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1545 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1546 | } else { |
1547 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1548 | at::AutoDispatchSkipFunctionalize guard; |
1549 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_cudnn_ctc_loss_out::call(log_probs_, targets_, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0_, out1_); |
1550 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
1551 | } |
1552 | } else { |
1553 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
1554 | { |
1555 | at::AutoDispatchSkipFunctionalize guard; |
1556 | tmp_output = at::_ops::_cudnn_ctc_loss::call(log_probs_, targets_, input_lengths, target_lengths, blank, deterministic, zero_infinity); |
1557 | } |
1558 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
1559 | at::functionalization::impl::commit_update(out0); |
1560 | at::functionalization::impl::sync(out0); |
1561 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
1562 | at::functionalization::impl::commit_update(out1); |
1563 | at::functionalization::impl::sync(out1); |
1564 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
1565 | } |
1566 | } |
1567 | |
1568 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
1569 | if (false) { |
1570 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1571 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1572 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1573 | auto input_meta = to_meta(input); |
1574 | auto weight_meta = to_meta(weight); |
1575 | auto weight_buf_meta = to_meta(weight_buf); |
1576 | auto hx_meta = to_meta(hx); |
1577 | auto cx_meta = to_meta(cx); |
1578 | auto dropout_state_meta = to_meta(dropout_state); |
1579 | auto out0_meta = to_meta(out0); |
1580 | auto out1_meta = to_meta(out1); |
1581 | auto out2_meta = to_meta(out2); |
1582 | auto out3_meta = to_meta(out3); |
1583 | auto out4_meta = to_meta(out4); |
1584 | at::AutoDispatchSkipFunctionalize func_guard; |
1585 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1586 | at::_ops::_cudnn_rnn_out::call(input_meta, weight_meta, weight_stride0, weight_buf_meta, hx_meta, cx_meta, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_meta, out0_meta, out1_meta, out2_meta, out3_meta, out4_meta); |
1587 | } |
1588 | |
1589 | at::Tensor input_; |
1590 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
1591 | at::functionalization::impl::sync(input); |
1592 | input_ = at::functionalization::impl::from_functional_tensor(input); |
1593 | } else { |
1594 | input_ = input; |
1595 | } |
1596 | |
1597 | ::std::vector<at::Tensor> weight_; |
1598 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
1599 | at::functionalization::impl::sync(weight); |
1600 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
1601 | } else { |
1602 | weight_ = weight.vec(); |
1603 | } |
1604 | |
1605 | c10::optional<at::Tensor> weight_buf_; |
1606 | if (at::functionalization::impl::isFunctionalTensor(weight_buf)) { |
1607 | at::functionalization::impl::sync(weight_buf); |
1608 | weight_buf_ = at::functionalization::impl::from_functional_tensor(weight_buf); |
1609 | } else { |
1610 | weight_buf_ = weight_buf; |
1611 | } |
1612 | |
1613 | at::Tensor hx_; |
1614 | if (at::functionalization::impl::isFunctionalTensor(hx)) { |
1615 | at::functionalization::impl::sync(hx); |
1616 | hx_ = at::functionalization::impl::from_functional_tensor(hx); |
1617 | } else { |
1618 | hx_ = hx; |
1619 | } |
1620 | |
1621 | c10::optional<at::Tensor> cx_; |
1622 | if (at::functionalization::impl::isFunctionalTensor(cx)) { |
1623 | at::functionalization::impl::sync(cx); |
1624 | cx_ = at::functionalization::impl::from_functional_tensor(cx); |
1625 | } else { |
1626 | cx_ = cx; |
1627 | } |
1628 | |
1629 | c10::optional<at::Tensor> dropout_state_; |
1630 | if (at::functionalization::impl::isFunctionalTensor(dropout_state)) { |
1631 | at::functionalization::impl::sync(dropout_state); |
1632 | dropout_state_ = at::functionalization::impl::from_functional_tensor(dropout_state); |
1633 | } else { |
1634 | dropout_state_ = dropout_state; |
1635 | } |
1636 | |
1637 | at::Tensor out0_; |
1638 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
1639 | at::functionalization::impl::sync(out0); |
1640 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
1641 | } else { |
1642 | out0_ = out0; |
1643 | } |
1644 | |
1645 | at::Tensor out1_; |
1646 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
1647 | at::functionalization::impl::sync(out1); |
1648 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
1649 | } else { |
1650 | out1_ = out1; |
1651 | } |
1652 | |
1653 | at::Tensor out2_; |
1654 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
1655 | at::functionalization::impl::sync(out2); |
1656 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
1657 | } else { |
1658 | out2_ = out2; |
1659 | } |
1660 | |
1661 | at::Tensor out3_; |
1662 | if (at::functionalization::impl::isFunctionalTensor(out3)) { |
1663 | at::functionalization::impl::sync(out3); |
1664 | out3_ = at::functionalization::impl::from_functional_tensor(out3); |
1665 | } else { |
1666 | out3_ = out3; |
1667 | } |
1668 | |
1669 | at::Tensor out4_; |
1670 | if (at::functionalization::impl::isFunctionalTensor(out4)) { |
1671 | at::functionalization::impl::sync(out4); |
1672 | out4_ = at::functionalization::impl::from_functional_tensor(out4); |
1673 | } else { |
1674 | out4_ = out4; |
1675 | } |
1676 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3) && at::functionalization::impl::isFunctionalTensor(out4))) { |
1677 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(weight_buf) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(cx) || at::functionalization::impl::isFunctionalTensor(dropout_state))) { |
1678 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1679 | TORCH_INTERNAL_ASSERT(false, |
1680 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1681 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1682 | } else { |
1683 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1684 | at::AutoDispatchSkipFunctionalize guard; |
1685 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_cudnn_rnn_out::call(input_, weight_, weight_stride0, weight_buf_, hx_, cx_, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_, out0_, out1_, out2_, out3_, out4_); |
1686 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4);; |
1687 | } |
1688 | } else { |
1689 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output; |
1690 | { |
1691 | at::AutoDispatchSkipFunctionalize guard; |
1692 | tmp_output = at::_ops::_cudnn_rnn::call(input_, weight_, weight_stride0, weight_buf_, hx_, cx_, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_); |
1693 | } |
1694 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
1695 | at::functionalization::impl::commit_update(out0); |
1696 | at::functionalization::impl::sync(out0); |
1697 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
1698 | at::functionalization::impl::commit_update(out1); |
1699 | at::functionalization::impl::sync(out1); |
1700 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
1701 | at::functionalization::impl::commit_update(out2); |
1702 | at::functionalization::impl::sync(out2); |
1703 | at::functionalization::impl::replace_(out3, std::get<3>(tmp_output)); |
1704 | at::functionalization::impl::commit_update(out3); |
1705 | at::functionalization::impl::sync(out3); |
1706 | at::functionalization::impl::replace_(out4, std::get<4>(tmp_output)); |
1707 | at::functionalization::impl::commit_update(out4); |
1708 | at::functionalization::impl::sync(out4); |
1709 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4); |
1710 | } |
1711 | } |
1712 | |
1713 | void _cudnn_rnn_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { |
1714 | if (false) { |
1715 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1716 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1717 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1718 | auto input_meta = to_meta(input); |
1719 | auto weight_meta = to_meta(weight); |
1720 | auto weight_buf_meta = to_meta(weight_buf); |
1721 | auto hx_meta = to_meta(hx); |
1722 | auto cx_meta = to_meta(cx); |
1723 | auto output_meta = to_meta(output); |
1724 | auto grad_output_meta = to_meta(grad_output); |
1725 | auto grad_hy_meta = to_meta(grad_hy); |
1726 | auto grad_cy_meta = to_meta(grad_cy); |
1727 | auto dropout_state_meta = to_meta(dropout_state); |
1728 | auto reserve_meta = to_meta(reserve); |
1729 | auto out0_meta = to_meta(out0); |
1730 | auto out1_meta = to_meta(out1); |
1731 | auto out2_meta = to_meta(out2); |
1732 | auto out3_meta = to_meta(out3); |
1733 | at::AutoDispatchSkipFunctionalize func_guard; |
1734 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1735 | at::_ops::_cudnn_rnn_backward_out::call(input_meta, weight_meta, weight_stride0, weight_buf_meta, hx_meta, cx_meta, output_meta, grad_output_meta, grad_hy_meta, grad_cy_meta, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_meta, reserve_meta, output_mask, out0_meta, out1_meta, out2_meta, out3_meta); |
1736 | } |
1737 | |
1738 | at::Tensor input_; |
1739 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
1740 | at::functionalization::impl::sync(input); |
1741 | input_ = at::functionalization::impl::from_functional_tensor(input); |
1742 | } else { |
1743 | input_ = input; |
1744 | } |
1745 | |
1746 | ::std::vector<at::Tensor> weight_; |
1747 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
1748 | at::functionalization::impl::sync(weight); |
1749 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
1750 | } else { |
1751 | weight_ = weight.vec(); |
1752 | } |
1753 | |
1754 | at::Tensor weight_buf_; |
1755 | if (at::functionalization::impl::isFunctionalTensor(weight_buf)) { |
1756 | at::functionalization::impl::sync(weight_buf); |
1757 | weight_buf_ = at::functionalization::impl::from_functional_tensor(weight_buf); |
1758 | } else { |
1759 | weight_buf_ = weight_buf; |
1760 | } |
1761 | |
1762 | at::Tensor hx_; |
1763 | if (at::functionalization::impl::isFunctionalTensor(hx)) { |
1764 | at::functionalization::impl::sync(hx); |
1765 | hx_ = at::functionalization::impl::from_functional_tensor(hx); |
1766 | } else { |
1767 | hx_ = hx; |
1768 | } |
1769 | |
1770 | c10::optional<at::Tensor> cx_; |
1771 | if (at::functionalization::impl::isFunctionalTensor(cx)) { |
1772 | at::functionalization::impl::sync(cx); |
1773 | cx_ = at::functionalization::impl::from_functional_tensor(cx); |
1774 | } else { |
1775 | cx_ = cx; |
1776 | } |
1777 | |
1778 | at::Tensor output_; |
1779 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
1780 | at::functionalization::impl::sync(output); |
1781 | output_ = at::functionalization::impl::from_functional_tensor(output); |
1782 | } else { |
1783 | output_ = output; |
1784 | } |
1785 | |
1786 | c10::optional<at::Tensor> grad_output_; |
1787 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
1788 | at::functionalization::impl::sync(grad_output); |
1789 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
1790 | } else { |
1791 | grad_output_ = grad_output; |
1792 | } |
1793 | |
1794 | c10::optional<at::Tensor> grad_hy_; |
1795 | if (at::functionalization::impl::isFunctionalTensor(grad_hy)) { |
1796 | at::functionalization::impl::sync(grad_hy); |
1797 | grad_hy_ = at::functionalization::impl::from_functional_tensor(grad_hy); |
1798 | } else { |
1799 | grad_hy_ = grad_hy; |
1800 | } |
1801 | |
1802 | c10::optional<at::Tensor> grad_cy_; |
1803 | if (at::functionalization::impl::isFunctionalTensor(grad_cy)) { |
1804 | at::functionalization::impl::sync(grad_cy); |
1805 | grad_cy_ = at::functionalization::impl::from_functional_tensor(grad_cy); |
1806 | } else { |
1807 | grad_cy_ = grad_cy; |
1808 | } |
1809 | |
1810 | c10::optional<at::Tensor> dropout_state_; |
1811 | if (at::functionalization::impl::isFunctionalTensor(dropout_state)) { |
1812 | at::functionalization::impl::sync(dropout_state); |
1813 | dropout_state_ = at::functionalization::impl::from_functional_tensor(dropout_state); |
1814 | } else { |
1815 | dropout_state_ = dropout_state; |
1816 | } |
1817 | |
1818 | at::Tensor reserve_; |
1819 | if (at::functionalization::impl::isFunctionalTensor(reserve)) { |
1820 | at::functionalization::impl::sync(reserve); |
1821 | reserve_ = at::functionalization::impl::from_functional_tensor(reserve); |
1822 | } else { |
1823 | reserve_ = reserve; |
1824 | } |
1825 | |
1826 | at::Tensor out0_; |
1827 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
1828 | at::functionalization::impl::sync(out0); |
1829 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
1830 | } else { |
1831 | out0_ = out0; |
1832 | } |
1833 | |
1834 | at::Tensor out1_; |
1835 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
1836 | at::functionalization::impl::sync(out1); |
1837 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
1838 | } else { |
1839 | out1_ = out1; |
1840 | } |
1841 | |
1842 | at::Tensor out2_; |
1843 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
1844 | at::functionalization::impl::sync(out2); |
1845 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
1846 | } else { |
1847 | out2_ = out2; |
1848 | } |
1849 | |
1850 | ::std::vector<at::Tensor> out3_; |
1851 | if (at::functionalization::impl::isFunctionalTensor(out3)) { |
1852 | at::functionalization::impl::sync(out3); |
1853 | out3_ = at::functionalization::impl::from_functional_tensor(out3); |
1854 | } else { |
1855 | out3_ = out3.vec(); |
1856 | } |
1857 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) { |
1858 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(weight_buf) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(cx) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(grad_hy) || at::functionalization::impl::isFunctionalTensor(grad_cy) || at::functionalization::impl::isFunctionalTensor(dropout_state) || at::functionalization::impl::isFunctionalTensor(reserve))) { |
1859 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1860 | TORCH_INTERNAL_ASSERT(false, |
1861 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1862 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1863 | } else { |
1864 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1865 | at::AutoDispatchSkipFunctionalize guard; |
1866 | at::_ops::_cudnn_rnn_backward_out::call(input_, weight_, weight_stride0, weight_buf_, hx_, cx_, output_, grad_output_, grad_hy_, grad_cy_, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_, reserve_, output_mask, out0_, out1_, out2_, out3_); |
1867 | ; |
1868 | } |
1869 | } else { |
1870 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> tmp_output; |
1871 | { |
1872 | at::AutoDispatchSkipFunctionalize guard; |
1873 | tmp_output = at::_ops::_cudnn_rnn_backward::call(input_, weight_, weight_stride0, weight_buf_, hx_, cx_, output_, grad_output_, grad_hy_, grad_cy_, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_, reserve_, output_mask); |
1874 | } |
1875 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
1876 | at::functionalization::impl::commit_update(out0); |
1877 | at::functionalization::impl::sync(out0); |
1878 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
1879 | at::functionalization::impl::commit_update(out1); |
1880 | at::functionalization::impl::sync(out1); |
1881 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
1882 | at::functionalization::impl::commit_update(out2); |
1883 | at::functionalization::impl::sync(out2); |
1884 | at::functionalization::impl::replace_(out3, std::get<3>(tmp_output)); |
1885 | at::functionalization::impl::commit_update(out3); |
1886 | at::functionalization::impl::sync(out3); |
1887 | |
1888 | } |
1889 | } |
1890 | |
1891 | ::std::tuple<at::Tensor &,at::Tensor &> _fused_dropout_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out0, at::Tensor & out1) { |
1892 | if (false) { |
1893 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1894 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1895 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1896 | auto self_meta = to_meta(self); |
1897 | auto out0_meta = to_meta(out0); |
1898 | auto out1_meta = to_meta(out1); |
1899 | at::AutoDispatchSkipFunctionalize func_guard; |
1900 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1901 | at::_ops::_fused_dropout_out::call(self_meta, p, generator, out0_meta, out1_meta); |
1902 | } |
1903 | |
1904 | at::Tensor self_; |
1905 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
1906 | at::functionalization::impl::sync(self); |
1907 | self_ = at::functionalization::impl::from_functional_tensor(self); |
1908 | } else { |
1909 | self_ = self; |
1910 | } |
1911 | |
1912 | at::Tensor out0_; |
1913 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
1914 | at::functionalization::impl::sync(out0); |
1915 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
1916 | } else { |
1917 | out0_ = out0; |
1918 | } |
1919 | |
1920 | at::Tensor out1_; |
1921 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
1922 | at::functionalization::impl::sync(out1); |
1923 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
1924 | } else { |
1925 | out1_ = out1; |
1926 | } |
1927 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
1928 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
1929 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1930 | TORCH_INTERNAL_ASSERT(false, |
1931 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1932 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1933 | } else { |
1934 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1935 | at::AutoDispatchSkipFunctionalize guard; |
1936 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_fused_dropout_out::call(self_, p, generator, out0_, out1_); |
1937 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
1938 | } |
1939 | } else { |
1940 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
1941 | { |
1942 | at::AutoDispatchSkipFunctionalize guard; |
1943 | tmp_output = at::_ops::_fused_dropout::call(self_, p, generator); |
1944 | } |
1945 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
1946 | at::functionalization::impl::commit_update(out0); |
1947 | at::functionalization::impl::sync(out0); |
1948 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
1949 | at::functionalization::impl::commit_update(out1); |
1950 | at::functionalization::impl::sync(out1); |
1951 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
1952 | } |
1953 | } |
1954 | |
1955 | at::Tensor & conj_physical_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
1956 | if (false) { |
1957 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1958 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1959 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1960 | auto self_meta = to_meta(self); |
1961 | auto out_meta = to_meta(out); |
1962 | at::AutoDispatchSkipFunctionalize func_guard; |
1963 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1964 | at::_ops::conj_physical_out::call(self_meta, out_meta); |
1965 | } |
1966 | |
1967 | at::Tensor self_; |
1968 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
1969 | at::functionalization::impl::sync(self); |
1970 | self_ = at::functionalization::impl::from_functional_tensor(self); |
1971 | } else { |
1972 | self_ = self; |
1973 | } |
1974 | |
1975 | at::Tensor out_; |
1976 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
1977 | at::functionalization::impl::sync(out); |
1978 | out_ = at::functionalization::impl::from_functional_tensor(out); |
1979 | } else { |
1980 | out_ = out; |
1981 | } |
1982 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
1983 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
1984 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1985 | TORCH_INTERNAL_ASSERT(false, |
1986 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1987 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1988 | } else { |
1989 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1990 | at::AutoDispatchSkipFunctionalize guard; |
1991 | at::Tensor tmp_output = at::_ops::conj_physical_out::call(self_, out_); |
1992 | return out;; |
1993 | } |
1994 | } else { |
1995 | at::Tensor tmp_output; |
1996 | { |
1997 | at::AutoDispatchSkipFunctionalize guard; |
1998 | tmp_output = at::_ops::conj_physical::call(self_); |
1999 | } |
2000 | at::functionalization::impl::replace_(out, tmp_output); |
2001 | at::functionalization::impl::commit_update(out); |
2002 | at::functionalization::impl::sync(out); |
2003 | return out; |
2004 | } |
2005 | } |
2006 | |
2007 | at::Tensor & conj_physical_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
2008 | if (true) { |
2009 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2010 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2011 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2012 | auto self_meta = to_meta(self); |
2013 | at::AutoDispatchSkipFunctionalize func_guard; |
2014 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2015 | at::_ops::conj_physical_::call(self_meta); |
2016 | } |
2017 | |
2018 | at::Tensor self_; |
2019 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2020 | at::functionalization::impl::sync(self); |
2021 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2022 | } else { |
2023 | self_ = self; |
2024 | } |
2025 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2026 | if ((false)) { |
2027 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2028 | TORCH_INTERNAL_ASSERT(false, |
2029 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2030 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2031 | } else { |
2032 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2033 | at::AutoDispatchSkipFunctionalize guard; |
2034 | at::Tensor tmp_output = at::_ops::conj_physical_::call(self_); |
2035 | return self;; |
2036 | } |
2037 | } else { |
2038 | at::Tensor tmp_output; |
2039 | { |
2040 | at::AutoDispatchSkipFunctionalize guard; |
2041 | tmp_output = at::_ops::conj_physical::call(self_); |
2042 | } |
2043 | at::functionalization::impl::replace_(self, tmp_output); |
2044 | at::functionalization::impl::commit_update(self); |
2045 | at::functionalization::impl::sync(self); |
2046 | return self; |
2047 | } |
2048 | } |
2049 | |
2050 | at::Tensor & add_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
2051 | if (false) { |
2052 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2053 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2054 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2055 | auto self_meta = to_meta(self); |
2056 | auto other_meta = to_meta(other); |
2057 | auto out_meta = to_meta(out); |
2058 | at::AutoDispatchSkipFunctionalize func_guard; |
2059 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2060 | at::_ops::add_out::call(self_meta, other_meta, alpha, out_meta); |
2061 | } |
2062 | |
2063 | at::Tensor self_; |
2064 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2065 | at::functionalization::impl::sync(self); |
2066 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2067 | } else { |
2068 | self_ = self; |
2069 | } |
2070 | |
2071 | at::Tensor other_; |
2072 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
2073 | at::functionalization::impl::sync(other); |
2074 | other_ = at::functionalization::impl::from_functional_tensor(other); |
2075 | } else { |
2076 | other_ = other; |
2077 | } |
2078 | |
2079 | at::Tensor out_; |
2080 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2081 | at::functionalization::impl::sync(out); |
2082 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2083 | } else { |
2084 | out_ = out; |
2085 | } |
2086 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2087 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
2088 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2089 | TORCH_INTERNAL_ASSERT(false, |
2090 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2091 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2092 | } else { |
2093 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2094 | at::AutoDispatchSkipFunctionalize guard; |
2095 | at::Tensor tmp_output = at::_ops::add_out::call(self_, other_, alpha, out_); |
2096 | return out;; |
2097 | } |
2098 | } else { |
2099 | at::Tensor tmp_output; |
2100 | { |
2101 | at::AutoDispatchSkipFunctionalize guard; |
2102 | tmp_output = at::_ops::add_Tensor::call(self_, other_, alpha); |
2103 | } |
2104 | at::functionalization::impl::replace_(out, tmp_output); |
2105 | at::functionalization::impl::commit_update(out); |
2106 | at::functionalization::impl::sync(out); |
2107 | return out; |
2108 | } |
2109 | } |
2110 | |
2111 | at::Tensor & add__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
2112 | if (true) { |
2113 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2114 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2115 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2116 | auto self_meta = to_meta(self); |
2117 | auto other_meta = to_meta(other); |
2118 | at::AutoDispatchSkipFunctionalize func_guard; |
2119 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2120 | at::_ops::add__Tensor::call(self_meta, other_meta, alpha); |
2121 | } |
2122 | |
2123 | at::Tensor self_; |
2124 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2125 | at::functionalization::impl::sync(self); |
2126 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2127 | } else { |
2128 | self_ = self; |
2129 | } |
2130 | |
2131 | at::Tensor other_; |
2132 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
2133 | at::functionalization::impl::sync(other); |
2134 | other_ = at::functionalization::impl::from_functional_tensor(other); |
2135 | } else { |
2136 | other_ = other; |
2137 | } |
2138 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2139 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
2140 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2141 | TORCH_INTERNAL_ASSERT(false, |
2142 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2143 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2144 | } else { |
2145 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2146 | at::AutoDispatchSkipFunctionalize guard; |
2147 | at::Tensor tmp_output = at::_ops::add__Tensor::call(self_, other_, alpha); |
2148 | return self;; |
2149 | } |
2150 | } else { |
2151 | at::Tensor tmp_output; |
2152 | { |
2153 | at::AutoDispatchSkipFunctionalize guard; |
2154 | tmp_output = at::_ops::add_Tensor::call(self_, other_, alpha); |
2155 | } |
2156 | at::functionalization::impl::replace_(self, tmp_output); |
2157 | at::functionalization::impl::commit_update(self); |
2158 | at::functionalization::impl::sync(self); |
2159 | return self; |
2160 | } |
2161 | } |
2162 | |
2163 | at::Tensor & add_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
2164 | if (false) { |
2165 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2166 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2167 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2168 | auto self_meta = to_meta(self); |
2169 | auto out_meta = to_meta(out); |
2170 | at::AutoDispatchSkipFunctionalize func_guard; |
2171 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2172 | at::_ops::add_Scalar_out::call(self_meta, other, alpha, out_meta); |
2173 | } |
2174 | |
2175 | at::Tensor self_; |
2176 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2177 | at::functionalization::impl::sync(self); |
2178 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2179 | } else { |
2180 | self_ = self; |
2181 | } |
2182 | |
2183 | at::Tensor out_; |
2184 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2185 | at::functionalization::impl::sync(out); |
2186 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2187 | } else { |
2188 | out_ = out; |
2189 | } |
2190 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2191 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2192 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2193 | TORCH_INTERNAL_ASSERT(false, |
2194 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2195 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2196 | } else { |
2197 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2198 | at::AutoDispatchSkipFunctionalize guard; |
2199 | at::Tensor tmp_output = at::_ops::add_Scalar_out::call(self_, other, alpha, out_); |
2200 | return out;; |
2201 | } |
2202 | } else { |
2203 | at::Tensor tmp_output; |
2204 | { |
2205 | at::AutoDispatchSkipFunctionalize guard; |
2206 | tmp_output = at::_ops::add_Scalar::call(self_, other, alpha); |
2207 | } |
2208 | at::functionalization::impl::replace_(out, tmp_output); |
2209 | at::functionalization::impl::commit_update(out); |
2210 | at::functionalization::impl::sync(out); |
2211 | return out; |
2212 | } |
2213 | } |
2214 | |
2215 | at::Tensor & add__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
2216 | if (true) { |
2217 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2218 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2219 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2220 | auto self_meta = to_meta(self); |
2221 | at::AutoDispatchSkipFunctionalize func_guard; |
2222 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2223 | at::_ops::add__Scalar::call(self_meta, other, alpha); |
2224 | } |
2225 | |
2226 | at::Tensor self_; |
2227 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2228 | at::functionalization::impl::sync(self); |
2229 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2230 | } else { |
2231 | self_ = self; |
2232 | } |
2233 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2234 | if ((false)) { |
2235 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2236 | TORCH_INTERNAL_ASSERT(false, |
2237 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2238 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2239 | } else { |
2240 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2241 | at::AutoDispatchSkipFunctionalize guard; |
2242 | at::Tensor tmp_output = at::_ops::add__Scalar::call(self_, other, alpha); |
2243 | return self;; |
2244 | } |
2245 | } else { |
2246 | at::Tensor tmp_output; |
2247 | { |
2248 | at::AutoDispatchSkipFunctionalize guard; |
2249 | tmp_output = at::_ops::add_Scalar::call(self_, other, alpha); |
2250 | } |
2251 | at::functionalization::impl::replace_(self, tmp_output); |
2252 | at::functionalization::impl::commit_update(self); |
2253 | at::functionalization::impl::sync(self); |
2254 | return self; |
2255 | } |
2256 | } |
2257 | |
2258 | at::Tensor & addmv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
2259 | if (false) { |
2260 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2261 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2262 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2263 | auto self_meta = to_meta(self); |
2264 | auto mat_meta = to_meta(mat); |
2265 | auto vec_meta = to_meta(vec); |
2266 | auto out_meta = to_meta(out); |
2267 | at::AutoDispatchSkipFunctionalize func_guard; |
2268 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2269 | at::_ops::addmv_out::call(self_meta, mat_meta, vec_meta, beta, alpha, out_meta); |
2270 | } |
2271 | |
2272 | at::Tensor self_; |
2273 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2274 | at::functionalization::impl::sync(self); |
2275 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2276 | } else { |
2277 | self_ = self; |
2278 | } |
2279 | |
2280 | at::Tensor mat_; |
2281 | if (at::functionalization::impl::isFunctionalTensor(mat)) { |
2282 | at::functionalization::impl::sync(mat); |
2283 | mat_ = at::functionalization::impl::from_functional_tensor(mat); |
2284 | } else { |
2285 | mat_ = mat; |
2286 | } |
2287 | |
2288 | at::Tensor vec_; |
2289 | if (at::functionalization::impl::isFunctionalTensor(vec)) { |
2290 | at::functionalization::impl::sync(vec); |
2291 | vec_ = at::functionalization::impl::from_functional_tensor(vec); |
2292 | } else { |
2293 | vec_ = vec; |
2294 | } |
2295 | |
2296 | at::Tensor out_; |
2297 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2298 | at::functionalization::impl::sync(out); |
2299 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2300 | } else { |
2301 | out_ = out; |
2302 | } |
2303 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2304 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat) || at::functionalization::impl::isFunctionalTensor(vec))) { |
2305 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2306 | TORCH_INTERNAL_ASSERT(false, |
2307 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2308 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2309 | } else { |
2310 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2311 | at::AutoDispatchSkipFunctionalize guard; |
2312 | at::Tensor tmp_output = at::_ops::addmv_out::call(self_, mat_, vec_, beta, alpha, out_); |
2313 | return out;; |
2314 | } |
2315 | } else { |
2316 | at::Tensor tmp_output; |
2317 | { |
2318 | at::AutoDispatchSkipFunctionalize guard; |
2319 | tmp_output = at::_ops::addmv::call(self_, mat_, vec_, beta, alpha); |
2320 | } |
2321 | at::functionalization::impl::replace_(out, tmp_output); |
2322 | at::functionalization::impl::commit_update(out); |
2323 | at::functionalization::impl::sync(out); |
2324 | return out; |
2325 | } |
2326 | } |
2327 | |
2328 | at::Tensor & addmv_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) { |
2329 | if (true) { |
2330 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2331 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2332 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2333 | auto self_meta = to_meta(self); |
2334 | auto mat_meta = to_meta(mat); |
2335 | auto vec_meta = to_meta(vec); |
2336 | at::AutoDispatchSkipFunctionalize func_guard; |
2337 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2338 | at::_ops::addmv_::call(self_meta, mat_meta, vec_meta, beta, alpha); |
2339 | } |
2340 | |
2341 | at::Tensor self_; |
2342 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2343 | at::functionalization::impl::sync(self); |
2344 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2345 | } else { |
2346 | self_ = self; |
2347 | } |
2348 | |
2349 | at::Tensor mat_; |
2350 | if (at::functionalization::impl::isFunctionalTensor(mat)) { |
2351 | at::functionalization::impl::sync(mat); |
2352 | mat_ = at::functionalization::impl::from_functional_tensor(mat); |
2353 | } else { |
2354 | mat_ = mat; |
2355 | } |
2356 | |
2357 | at::Tensor vec_; |
2358 | if (at::functionalization::impl::isFunctionalTensor(vec)) { |
2359 | at::functionalization::impl::sync(vec); |
2360 | vec_ = at::functionalization::impl::from_functional_tensor(vec); |
2361 | } else { |
2362 | vec_ = vec; |
2363 | } |
2364 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2365 | if ((false || at::functionalization::impl::isFunctionalTensor(mat) || at::functionalization::impl::isFunctionalTensor(vec))) { |
2366 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2367 | TORCH_INTERNAL_ASSERT(false, |
2368 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2369 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2370 | } else { |
2371 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2372 | at::AutoDispatchSkipFunctionalize guard; |
2373 | at::Tensor tmp_output = at::_ops::addmv_::call(self_, mat_, vec_, beta, alpha); |
2374 | return self;; |
2375 | } |
2376 | } else { |
2377 | at::Tensor tmp_output; |
2378 | { |
2379 | at::AutoDispatchSkipFunctionalize guard; |
2380 | tmp_output = at::_ops::addmv::call(self_, mat_, vec_, beta, alpha); |
2381 | } |
2382 | at::functionalization::impl::replace_(self, tmp_output); |
2383 | at::functionalization::impl::commit_update(self); |
2384 | at::functionalization::impl::sync(self); |
2385 | return self; |
2386 | } |
2387 | } |
2388 | |
2389 | at::Tensor & addr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
2390 | if (false) { |
2391 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2392 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2393 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2394 | auto self_meta = to_meta(self); |
2395 | auto vec1_meta = to_meta(vec1); |
2396 | auto vec2_meta = to_meta(vec2); |
2397 | auto out_meta = to_meta(out); |
2398 | at::AutoDispatchSkipFunctionalize func_guard; |
2399 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2400 | at::_ops::addr_out::call(self_meta, vec1_meta, vec2_meta, beta, alpha, out_meta); |
2401 | } |
2402 | |
2403 | at::Tensor self_; |
2404 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2405 | at::functionalization::impl::sync(self); |
2406 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2407 | } else { |
2408 | self_ = self; |
2409 | } |
2410 | |
2411 | at::Tensor vec1_; |
2412 | if (at::functionalization::impl::isFunctionalTensor(vec1)) { |
2413 | at::functionalization::impl::sync(vec1); |
2414 | vec1_ = at::functionalization::impl::from_functional_tensor(vec1); |
2415 | } else { |
2416 | vec1_ = vec1; |
2417 | } |
2418 | |
2419 | at::Tensor vec2_; |
2420 | if (at::functionalization::impl::isFunctionalTensor(vec2)) { |
2421 | at::functionalization::impl::sync(vec2); |
2422 | vec2_ = at::functionalization::impl::from_functional_tensor(vec2); |
2423 | } else { |
2424 | vec2_ = vec2; |
2425 | } |
2426 | |
2427 | at::Tensor out_; |
2428 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2429 | at::functionalization::impl::sync(out); |
2430 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2431 | } else { |
2432 | out_ = out; |
2433 | } |
2434 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2435 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(vec1) || at::functionalization::impl::isFunctionalTensor(vec2))) { |
2436 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2437 | TORCH_INTERNAL_ASSERT(false, |
2438 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2439 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2440 | } else { |
2441 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2442 | at::AutoDispatchSkipFunctionalize guard; |
2443 | at::Tensor tmp_output = at::_ops::addr_out::call(self_, vec1_, vec2_, beta, alpha, out_); |
2444 | return out;; |
2445 | } |
2446 | } else { |
2447 | at::Tensor tmp_output; |
2448 | { |
2449 | at::AutoDispatchSkipFunctionalize guard; |
2450 | tmp_output = at::_ops::addr::call(self_, vec1_, vec2_, beta, alpha); |
2451 | } |
2452 | at::functionalization::impl::replace_(out, tmp_output); |
2453 | at::functionalization::impl::commit_update(out); |
2454 | at::functionalization::impl::sync(out); |
2455 | return out; |
2456 | } |
2457 | } |
2458 | |
2459 | at::Tensor & addr_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) { |
2460 | if (true) { |
2461 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2462 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2463 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2464 | auto self_meta = to_meta(self); |
2465 | auto vec1_meta = to_meta(vec1); |
2466 | auto vec2_meta = to_meta(vec2); |
2467 | at::AutoDispatchSkipFunctionalize func_guard; |
2468 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2469 | at::_ops::addr_::call(self_meta, vec1_meta, vec2_meta, beta, alpha); |
2470 | } |
2471 | |
2472 | at::Tensor self_; |
2473 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2474 | at::functionalization::impl::sync(self); |
2475 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2476 | } else { |
2477 | self_ = self; |
2478 | } |
2479 | |
2480 | at::Tensor vec1_; |
2481 | if (at::functionalization::impl::isFunctionalTensor(vec1)) { |
2482 | at::functionalization::impl::sync(vec1); |
2483 | vec1_ = at::functionalization::impl::from_functional_tensor(vec1); |
2484 | } else { |
2485 | vec1_ = vec1; |
2486 | } |
2487 | |
2488 | at::Tensor vec2_; |
2489 | if (at::functionalization::impl::isFunctionalTensor(vec2)) { |
2490 | at::functionalization::impl::sync(vec2); |
2491 | vec2_ = at::functionalization::impl::from_functional_tensor(vec2); |
2492 | } else { |
2493 | vec2_ = vec2; |
2494 | } |
2495 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2496 | if ((false || at::functionalization::impl::isFunctionalTensor(vec1) || at::functionalization::impl::isFunctionalTensor(vec2))) { |
2497 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2498 | TORCH_INTERNAL_ASSERT(false, |
2499 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2500 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2501 | } else { |
2502 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2503 | at::AutoDispatchSkipFunctionalize guard; |
2504 | at::Tensor tmp_output = at::_ops::addr_::call(self_, vec1_, vec2_, beta, alpha); |
2505 | return self;; |
2506 | } |
2507 | } else { |
2508 | at::Tensor tmp_output; |
2509 | { |
2510 | at::AutoDispatchSkipFunctionalize guard; |
2511 | tmp_output = at::_ops::addr::call(self_, vec1_, vec2_, beta, alpha); |
2512 | } |
2513 | at::functionalization::impl::replace_(self, tmp_output); |
2514 | at::functionalization::impl::commit_update(self); |
2515 | at::functionalization::impl::sync(self); |
2516 | return self; |
2517 | } |
2518 | } |
2519 | |
2520 | at::Tensor & all_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) { |
2521 | if (false) { |
2522 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2523 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2524 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2525 | auto self_meta = to_meta(self); |
2526 | auto out_meta = to_meta(out); |
2527 | at::AutoDispatchSkipFunctionalize func_guard; |
2528 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2529 | at::_ops::all_out::call(self_meta, dim, keepdim, out_meta); |
2530 | } |
2531 | |
2532 | at::Tensor self_; |
2533 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2534 | at::functionalization::impl::sync(self); |
2535 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2536 | } else { |
2537 | self_ = self; |
2538 | } |
2539 | |
2540 | at::Tensor out_; |
2541 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2542 | at::functionalization::impl::sync(out); |
2543 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2544 | } else { |
2545 | out_ = out; |
2546 | } |
2547 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2548 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2549 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2550 | TORCH_INTERNAL_ASSERT(false, |
2551 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2552 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2553 | } else { |
2554 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2555 | at::AutoDispatchSkipFunctionalize guard; |
2556 | at::Tensor tmp_output = at::_ops::all_out::call(self_, dim, keepdim, out_); |
2557 | return out;; |
2558 | } |
2559 | } else { |
2560 | at::Tensor tmp_output; |
2561 | { |
2562 | at::AutoDispatchSkipFunctionalize guard; |
2563 | tmp_output = at::_ops::all_dim::call(self_, dim, keepdim); |
2564 | } |
2565 | at::functionalization::impl::replace_(out, tmp_output); |
2566 | at::functionalization::impl::commit_update(out); |
2567 | at::functionalization::impl::sync(out); |
2568 | return out; |
2569 | } |
2570 | } |
2571 | |
2572 | at::Tensor & all_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) { |
2573 | if (false) { |
2574 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2575 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2576 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2577 | auto self_meta = to_meta(self); |
2578 | auto out_meta = to_meta(out); |
2579 | at::AutoDispatchSkipFunctionalize func_guard; |
2580 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2581 | at::_ops::all_dimname_out::call(self_meta, dim, keepdim, out_meta); |
2582 | } |
2583 | |
2584 | at::Tensor self_; |
2585 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2586 | at::functionalization::impl::sync(self); |
2587 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2588 | } else { |
2589 | self_ = self; |
2590 | } |
2591 | |
2592 | at::Tensor out_; |
2593 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2594 | at::functionalization::impl::sync(out); |
2595 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2596 | } else { |
2597 | out_ = out; |
2598 | } |
2599 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2600 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2601 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2602 | TORCH_INTERNAL_ASSERT(false, |
2603 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2604 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2605 | } else { |
2606 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2607 | at::AutoDispatchSkipFunctionalize guard; |
2608 | at::Tensor tmp_output = at::_ops::all_dimname_out::call(self_, dim, keepdim, out_); |
2609 | return out;; |
2610 | } |
2611 | } else { |
2612 | at::Tensor tmp_output; |
2613 | { |
2614 | at::AutoDispatchSkipFunctionalize guard; |
2615 | tmp_output = at::_ops::all_dimname::call(self_, dim, keepdim); |
2616 | } |
2617 | at::functionalization::impl::replace_(out, tmp_output); |
2618 | at::functionalization::impl::commit_update(out); |
2619 | at::functionalization::impl::sync(out); |
2620 | return out; |
2621 | } |
2622 | } |
2623 | |
2624 | at::Tensor & argmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out) { |
2625 | if (false) { |
2626 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2627 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2628 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2629 | auto self_meta = to_meta(self); |
2630 | auto out_meta = to_meta(out); |
2631 | at::AutoDispatchSkipFunctionalize func_guard; |
2632 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2633 | at::_ops::argmax_out::call(self_meta, dim, keepdim, out_meta); |
2634 | } |
2635 | |
2636 | at::Tensor self_; |
2637 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2638 | at::functionalization::impl::sync(self); |
2639 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2640 | } else { |
2641 | self_ = self; |
2642 | } |
2643 | |
2644 | at::Tensor out_; |
2645 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2646 | at::functionalization::impl::sync(out); |
2647 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2648 | } else { |
2649 | out_ = out; |
2650 | } |
2651 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2652 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2653 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2654 | TORCH_INTERNAL_ASSERT(false, |
2655 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2656 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2657 | } else { |
2658 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2659 | at::AutoDispatchSkipFunctionalize guard; |
2660 | at::Tensor tmp_output = at::_ops::argmax_out::call(self_, dim, keepdim, out_); |
2661 | return out;; |
2662 | } |
2663 | } else { |
2664 | at::Tensor tmp_output; |
2665 | { |
2666 | at::AutoDispatchSkipFunctionalize guard; |
2667 | tmp_output = at::_ops::argmax::call(self_, dim, keepdim); |
2668 | } |
2669 | at::functionalization::impl::replace_(out, tmp_output); |
2670 | at::functionalization::impl::commit_update(out); |
2671 | at::functionalization::impl::sync(out); |
2672 | return out; |
2673 | } |
2674 | } |
2675 | |
2676 | at::Tensor & atan_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
2677 | if (false) { |
2678 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2679 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2680 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2681 | auto self_meta = to_meta(self); |
2682 | auto out_meta = to_meta(out); |
2683 | at::AutoDispatchSkipFunctionalize func_guard; |
2684 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2685 | at::_ops::atan_out::call(self_meta, out_meta); |
2686 | } |
2687 | |
2688 | at::Tensor self_; |
2689 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2690 | at::functionalization::impl::sync(self); |
2691 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2692 | } else { |
2693 | self_ = self; |
2694 | } |
2695 | |
2696 | at::Tensor out_; |
2697 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2698 | at::functionalization::impl::sync(out); |
2699 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2700 | } else { |
2701 | out_ = out; |
2702 | } |
2703 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2704 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2705 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2706 | TORCH_INTERNAL_ASSERT(false, |
2707 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2708 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2709 | } else { |
2710 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2711 | at::AutoDispatchSkipFunctionalize guard; |
2712 | at::Tensor tmp_output = at::_ops::atan_out::call(self_, out_); |
2713 | return out;; |
2714 | } |
2715 | } else { |
2716 | at::Tensor tmp_output; |
2717 | { |
2718 | at::AutoDispatchSkipFunctionalize guard; |
2719 | tmp_output = at::_ops::atan::call(self_); |
2720 | } |
2721 | at::functionalization::impl::replace_(out, tmp_output); |
2722 | at::functionalization::impl::commit_update(out); |
2723 | at::functionalization::impl::sync(out); |
2724 | return out; |
2725 | } |
2726 | } |
2727 | |
2728 | at::Tensor & atan_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
2729 | if (true) { |
2730 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2731 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2732 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2733 | auto self_meta = to_meta(self); |
2734 | at::AutoDispatchSkipFunctionalize func_guard; |
2735 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2736 | at::_ops::atan_::call(self_meta); |
2737 | } |
2738 | |
2739 | at::Tensor self_; |
2740 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2741 | at::functionalization::impl::sync(self); |
2742 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2743 | } else { |
2744 | self_ = self; |
2745 | } |
2746 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2747 | if ((false)) { |
2748 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2749 | TORCH_INTERNAL_ASSERT(false, |
2750 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2751 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2752 | } else { |
2753 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2754 | at::AutoDispatchSkipFunctionalize guard; |
2755 | at::Tensor tmp_output = at::_ops::atan_::call(self_); |
2756 | return self;; |
2757 | } |
2758 | } else { |
2759 | at::Tensor tmp_output; |
2760 | { |
2761 | at::AutoDispatchSkipFunctionalize guard; |
2762 | tmp_output = at::_ops::atan::call(self_); |
2763 | } |
2764 | at::functionalization::impl::replace_(self, tmp_output); |
2765 | at::functionalization::impl::commit_update(self); |
2766 | at::functionalization::impl::sync(self); |
2767 | return self; |
2768 | } |
2769 | } |
2770 | |
2771 | at::Tensor & bartlett_window_out_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { |
2772 | if (false) { |
2773 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2774 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2775 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2776 | auto out_meta = to_meta(out); |
2777 | at::AutoDispatchSkipFunctionalize func_guard; |
2778 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2779 | at::_ops::bartlett_window_out::call(window_length, out_meta); |
2780 | } |
2781 | |
2782 | at::Tensor out_; |
2783 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2784 | at::functionalization::impl::sync(out); |
2785 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2786 | } else { |
2787 | out_ = out; |
2788 | } |
2789 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2790 | if ((false)) { |
2791 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2792 | TORCH_INTERNAL_ASSERT(false, |
2793 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2794 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2795 | } else { |
2796 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2797 | at::AutoDispatchSkipFunctionalize guard; |
2798 | at::Tensor tmp_output = at::_ops::bartlett_window_out::call(window_length, out_); |
2799 | return out;; |
2800 | } |
2801 | } else { |
2802 | at::Tensor tmp_output; |
2803 | { |
2804 | at::AutoDispatchSkipFunctionalize guard; |
2805 | tmp_output = at::_ops::bartlett_window::call(window_length, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
2806 | } |
2807 | at::functionalization::impl::replace_(out, tmp_output); |
2808 | at::functionalization::impl::commit_update(out); |
2809 | at::functionalization::impl::sync(out); |
2810 | return out; |
2811 | } |
2812 | } |
2813 | |
2814 | at::Tensor & bartlett_window_out_periodic_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { |
2815 | if (false) { |
2816 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2817 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2818 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2819 | auto out_meta = to_meta(out); |
2820 | at::AutoDispatchSkipFunctionalize func_guard; |
2821 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2822 | at::_ops::bartlett_window_periodic_out::call(window_length, periodic, out_meta); |
2823 | } |
2824 | |
2825 | at::Tensor out_; |
2826 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2827 | at::functionalization::impl::sync(out); |
2828 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2829 | } else { |
2830 | out_ = out; |
2831 | } |
2832 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2833 | if ((false)) { |
2834 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2835 | TORCH_INTERNAL_ASSERT(false, |
2836 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2837 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2838 | } else { |
2839 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2840 | at::AutoDispatchSkipFunctionalize guard; |
2841 | at::Tensor tmp_output = at::_ops::bartlett_window_periodic_out::call(window_length, periodic, out_); |
2842 | return out;; |
2843 | } |
2844 | } else { |
2845 | at::Tensor tmp_output; |
2846 | { |
2847 | at::AutoDispatchSkipFunctionalize guard; |
2848 | tmp_output = at::_ops::bartlett_window_periodic::call(window_length, periodic, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
2849 | } |
2850 | at::functionalization::impl::replace_(out, tmp_output); |
2851 | at::functionalization::impl::commit_update(out); |
2852 | at::functionalization::impl::sync(out); |
2853 | return out; |
2854 | } |
2855 | } |
2856 | |
2857 | at::Tensor & binary_cross_entropy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) { |
2858 | if (false) { |
2859 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2860 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2861 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2862 | auto self_meta = to_meta(self); |
2863 | auto target_meta = to_meta(target); |
2864 | auto weight_meta = to_meta(weight); |
2865 | auto out_meta = to_meta(out); |
2866 | at::AutoDispatchSkipFunctionalize func_guard; |
2867 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2868 | at::_ops::binary_cross_entropy_out::call(self_meta, target_meta, weight_meta, reduction, out_meta); |
2869 | } |
2870 | |
2871 | at::Tensor self_; |
2872 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2873 | at::functionalization::impl::sync(self); |
2874 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2875 | } else { |
2876 | self_ = self; |
2877 | } |
2878 | |
2879 | at::Tensor target_; |
2880 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
2881 | at::functionalization::impl::sync(target); |
2882 | target_ = at::functionalization::impl::from_functional_tensor(target); |
2883 | } else { |
2884 | target_ = target; |
2885 | } |
2886 | |
2887 | c10::optional<at::Tensor> weight_; |
2888 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
2889 | at::functionalization::impl::sync(weight); |
2890 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
2891 | } else { |
2892 | weight_ = weight; |
2893 | } |
2894 | |
2895 | at::Tensor out_; |
2896 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2897 | at::functionalization::impl::sync(out); |
2898 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2899 | } else { |
2900 | out_ = out; |
2901 | } |
2902 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2903 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) { |
2904 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2905 | TORCH_INTERNAL_ASSERT(false, |
2906 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2907 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2908 | } else { |
2909 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2910 | at::AutoDispatchSkipFunctionalize guard; |
2911 | at::Tensor tmp_output = at::_ops::binary_cross_entropy_out::call(self_, target_, weight_, reduction, out_); |
2912 | return out;; |
2913 | } |
2914 | } else { |
2915 | at::Tensor tmp_output; |
2916 | { |
2917 | at::AutoDispatchSkipFunctionalize guard; |
2918 | tmp_output = at::_ops::binary_cross_entropy::call(self_, target_, weight_, reduction); |
2919 | } |
2920 | at::functionalization::impl::replace_(out, tmp_output); |
2921 | at::functionalization::impl::commit_update(out); |
2922 | at::functionalization::impl::sync(out); |
2923 | return out; |
2924 | } |
2925 | } |
2926 | |
2927 | at::Tensor & bitwise_not_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
2928 | if (false) { |
2929 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2930 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2931 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2932 | auto self_meta = to_meta(self); |
2933 | auto out_meta = to_meta(out); |
2934 | at::AutoDispatchSkipFunctionalize func_guard; |
2935 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2936 | at::_ops::bitwise_not_out::call(self_meta, out_meta); |
2937 | } |
2938 | |
2939 | at::Tensor self_; |
2940 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2941 | at::functionalization::impl::sync(self); |
2942 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2943 | } else { |
2944 | self_ = self; |
2945 | } |
2946 | |
2947 | at::Tensor out_; |
2948 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2949 | at::functionalization::impl::sync(out); |
2950 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2951 | } else { |
2952 | out_ = out; |
2953 | } |
2954 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2955 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2956 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2957 | TORCH_INTERNAL_ASSERT(false, |
2958 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2959 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2960 | } else { |
2961 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2962 | at::AutoDispatchSkipFunctionalize guard; |
2963 | at::Tensor tmp_output = at::_ops::bitwise_not_out::call(self_, out_); |
2964 | return out;; |
2965 | } |
2966 | } else { |
2967 | at::Tensor tmp_output; |
2968 | { |
2969 | at::AutoDispatchSkipFunctionalize guard; |
2970 | tmp_output = at::_ops::bitwise_not::call(self_); |
2971 | } |
2972 | at::functionalization::impl::replace_(out, tmp_output); |
2973 | at::functionalization::impl::commit_update(out); |
2974 | at::functionalization::impl::sync(out); |
2975 | return out; |
2976 | } |
2977 | } |
2978 | |
2979 | at::Tensor & bitwise_not_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
2980 | if (true) { |
2981 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2982 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2983 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2984 | auto self_meta = to_meta(self); |
2985 | at::AutoDispatchSkipFunctionalize func_guard; |
2986 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2987 | at::_ops::bitwise_not_::call(self_meta); |
2988 | } |
2989 | |
2990 | at::Tensor self_; |
2991 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2992 | at::functionalization::impl::sync(self); |
2993 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2994 | } else { |
2995 | self_ = self; |
2996 | } |
2997 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2998 | if ((false)) { |
2999 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3000 | TORCH_INTERNAL_ASSERT(false, |
3001 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3002 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3003 | } else { |
3004 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3005 | at::AutoDispatchSkipFunctionalize guard; |
3006 | at::Tensor tmp_output = at::_ops::bitwise_not_::call(self_); |
3007 | return self;; |
3008 | } |
3009 | } else { |
3010 | at::Tensor tmp_output; |
3011 | { |
3012 | at::AutoDispatchSkipFunctionalize guard; |
3013 | tmp_output = at::_ops::bitwise_not::call(self_); |
3014 | } |
3015 | at::functionalization::impl::replace_(self, tmp_output); |
3016 | at::functionalization::impl::commit_update(self); |
3017 | at::functionalization::impl::sync(self); |
3018 | return self; |
3019 | } |
3020 | } |
3021 | |
3022 | at::Tensor & logical_xor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
3023 | if (false) { |
3024 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3025 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3026 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3027 | auto self_meta = to_meta(self); |
3028 | auto other_meta = to_meta(other); |
3029 | auto out_meta = to_meta(out); |
3030 | at::AutoDispatchSkipFunctionalize func_guard; |
3031 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3032 | at::_ops::logical_xor_out::call(self_meta, other_meta, out_meta); |
3033 | } |
3034 | |
3035 | at::Tensor self_; |
3036 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3037 | at::functionalization::impl::sync(self); |
3038 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3039 | } else { |
3040 | self_ = self; |
3041 | } |
3042 | |
3043 | at::Tensor other_; |
3044 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
3045 | at::functionalization::impl::sync(other); |
3046 | other_ = at::functionalization::impl::from_functional_tensor(other); |
3047 | } else { |
3048 | other_ = other; |
3049 | } |
3050 | |
3051 | at::Tensor out_; |
3052 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3053 | at::functionalization::impl::sync(out); |
3054 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3055 | } else { |
3056 | out_ = out; |
3057 | } |
3058 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3059 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
3060 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3061 | TORCH_INTERNAL_ASSERT(false, |
3062 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3063 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3064 | } else { |
3065 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3066 | at::AutoDispatchSkipFunctionalize guard; |
3067 | at::Tensor tmp_output = at::_ops::logical_xor_out::call(self_, other_, out_); |
3068 | return out;; |
3069 | } |
3070 | } else { |
3071 | at::Tensor tmp_output; |
3072 | { |
3073 | at::AutoDispatchSkipFunctionalize guard; |
3074 | tmp_output = at::_ops::logical_xor::call(self_, other_); |
3075 | } |
3076 | at::functionalization::impl::replace_(out, tmp_output); |
3077 | at::functionalization::impl::commit_update(out); |
3078 | at::functionalization::impl::sync(out); |
3079 | return out; |
3080 | } |
3081 | } |
3082 | |
3083 | at::Tensor & logical_xor_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
3084 | if (true) { |
3085 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3086 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3087 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3088 | auto self_meta = to_meta(self); |
3089 | auto other_meta = to_meta(other); |
3090 | at::AutoDispatchSkipFunctionalize func_guard; |
3091 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3092 | at::_ops::logical_xor_::call(self_meta, other_meta); |
3093 | } |
3094 | |
3095 | at::Tensor self_; |
3096 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3097 | at::functionalization::impl::sync(self); |
3098 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3099 | } else { |
3100 | self_ = self; |
3101 | } |
3102 | |
3103 | at::Tensor other_; |
3104 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
3105 | at::functionalization::impl::sync(other); |
3106 | other_ = at::functionalization::impl::from_functional_tensor(other); |
3107 | } else { |
3108 | other_ = other; |
3109 | } |
3110 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
3111 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
3112 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3113 | TORCH_INTERNAL_ASSERT(false, |
3114 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3115 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3116 | } else { |
3117 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3118 | at::AutoDispatchSkipFunctionalize guard; |
3119 | at::Tensor tmp_output = at::_ops::logical_xor_::call(self_, other_); |
3120 | return self;; |
3121 | } |
3122 | } else { |
3123 | at::Tensor tmp_output; |
3124 | { |
3125 | at::AutoDispatchSkipFunctionalize guard; |
3126 | tmp_output = at::_ops::logical_xor::call(self_, other_); |
3127 | } |
3128 | at::functionalization::impl::replace_(self, tmp_output); |
3129 | at::functionalization::impl::commit_update(self); |
3130 | at::functionalization::impl::sync(self); |
3131 | return self; |
3132 | } |
3133 | } |
3134 | |
3135 | at::Tensor & blackman_window_out_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { |
3136 | if (false) { |
3137 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3138 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3139 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3140 | auto out_meta = to_meta(out); |
3141 | at::AutoDispatchSkipFunctionalize func_guard; |
3142 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3143 | at::_ops::blackman_window_out::call(window_length, out_meta); |
3144 | } |
3145 | |
3146 | at::Tensor out_; |
3147 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3148 | at::functionalization::impl::sync(out); |
3149 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3150 | } else { |
3151 | out_ = out; |
3152 | } |
3153 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3154 | if ((false)) { |
3155 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3156 | TORCH_INTERNAL_ASSERT(false, |
3157 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3158 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3159 | } else { |
3160 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3161 | at::AutoDispatchSkipFunctionalize guard; |
3162 | at::Tensor tmp_output = at::_ops::blackman_window_out::call(window_length, out_); |
3163 | return out;; |
3164 | } |
3165 | } else { |
3166 | at::Tensor tmp_output; |
3167 | { |
3168 | at::AutoDispatchSkipFunctionalize guard; |
3169 | tmp_output = at::_ops::blackman_window::call(window_length, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
3170 | } |
3171 | at::functionalization::impl::replace_(out, tmp_output); |
3172 | at::functionalization::impl::commit_update(out); |
3173 | at::functionalization::impl::sync(out); |
3174 | return out; |
3175 | } |
3176 | } |
3177 | |
3178 | at::Tensor & blackman_window_out_periodic_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { |
3179 | if (false) { |
3180 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3181 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3182 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3183 | auto out_meta = to_meta(out); |
3184 | at::AutoDispatchSkipFunctionalize func_guard; |
3185 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3186 | at::_ops::blackman_window_periodic_out::call(window_length, periodic, out_meta); |
3187 | } |
3188 | |
3189 | at::Tensor out_; |
3190 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3191 | at::functionalization::impl::sync(out); |
3192 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3193 | } else { |
3194 | out_ = out; |
3195 | } |
3196 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3197 | if ((false)) { |
3198 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3199 | TORCH_INTERNAL_ASSERT(false, |
3200 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3201 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3202 | } else { |
3203 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3204 | at::AutoDispatchSkipFunctionalize guard; |
3205 | at::Tensor tmp_output = at::_ops::blackman_window_periodic_out::call(window_length, periodic, out_); |
3206 | return out;; |
3207 | } |
3208 | } else { |
3209 | at::Tensor tmp_output; |
3210 | { |
3211 | at::AutoDispatchSkipFunctionalize guard; |
3212 | tmp_output = at::_ops::blackman_window_periodic::call(window_length, periodic, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
3213 | } |
3214 | at::functionalization::impl::replace_(out, tmp_output); |
3215 | at::functionalization::impl::commit_update(out); |
3216 | at::functionalization::impl::sync(out); |
3217 | return out; |
3218 | } |
3219 | } |
3220 | |
3221 | at::Tensor & clamp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max, at::Tensor & out) { |
3222 | if (false) { |
3223 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3224 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3225 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3226 | auto self_meta = to_meta(self); |
3227 | auto out_meta = to_meta(out); |
3228 | at::AutoDispatchSkipFunctionalize func_guard; |
3229 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3230 | at::_ops::clamp_out::call(self_meta, min, max, out_meta); |
3231 | } |
3232 | |
3233 | at::Tensor self_; |
3234 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3235 | at::functionalization::impl::sync(self); |
3236 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3237 | } else { |
3238 | self_ = self; |
3239 | } |
3240 | |
3241 | at::Tensor out_; |
3242 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3243 | at::functionalization::impl::sync(out); |
3244 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3245 | } else { |
3246 | out_ = out; |
3247 | } |
3248 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3249 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
3250 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3251 | TORCH_INTERNAL_ASSERT(false, |
3252 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3253 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3254 | } else { |
3255 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3256 | at::AutoDispatchSkipFunctionalize guard; |
3257 | at::Tensor tmp_output = at::_ops::clamp_out::call(self_, min, max, out_); |
3258 | return out;; |
3259 | } |
3260 | } else { |
3261 | at::Tensor tmp_output; |
3262 | { |
3263 | at::AutoDispatchSkipFunctionalize guard; |
3264 | tmp_output = at::_ops::clamp::call(self_, min, max); |
3265 | } |
3266 | at::functionalization::impl::replace_(out, tmp_output); |
3267 | at::functionalization::impl::commit_update(out); |
3268 | at::functionalization::impl::sync(out); |
3269 | return out; |
3270 | } |
3271 | } |
3272 | |
3273 | at::Tensor & clamp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) { |
3274 | if (true) { |
3275 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3276 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3277 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3278 | auto self_meta = to_meta(self); |
3279 | at::AutoDispatchSkipFunctionalize func_guard; |
3280 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3281 | at::_ops::clamp_::call(self_meta, min, max); |
3282 | } |
3283 | |
3284 | at::Tensor self_; |
3285 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3286 | at::functionalization::impl::sync(self); |
3287 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3288 | } else { |
3289 | self_ = self; |
3290 | } |
3291 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
3292 | if ((false)) { |
3293 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3294 | TORCH_INTERNAL_ASSERT(false, |
3295 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3296 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3297 | } else { |
3298 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3299 | at::AutoDispatchSkipFunctionalize guard; |
3300 | at::Tensor tmp_output = at::_ops::clamp_::call(self_, min, max); |
3301 | return self;; |
3302 | } |
3303 | } else { |
3304 | at::Tensor tmp_output; |
3305 | { |
3306 | at::AutoDispatchSkipFunctionalize guard; |
3307 | tmp_output = at::_ops::clamp::call(self_, min, max); |
3308 | } |
3309 | at::functionalization::impl::replace_(self, tmp_output); |
3310 | at::functionalization::impl::commit_update(self); |
3311 | at::functionalization::impl::sync(self); |
3312 | return self; |
3313 | } |
3314 | } |
3315 | |
3316 | at::Tensor & clamp_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max, at::Tensor & out) { |
3317 | if (false) { |
3318 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3319 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3320 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3321 | auto self_meta = to_meta(self); |
3322 | auto min_meta = to_meta(min); |
3323 | auto max_meta = to_meta(max); |
3324 | auto out_meta = to_meta(out); |
3325 | at::AutoDispatchSkipFunctionalize func_guard; |
3326 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3327 | at::_ops::clamp_Tensor_out::call(self_meta, min_meta, max_meta, out_meta); |
3328 | } |
3329 | |
3330 | at::Tensor self_; |
3331 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3332 | at::functionalization::impl::sync(self); |
3333 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3334 | } else { |
3335 | self_ = self; |
3336 | } |
3337 | |
3338 | c10::optional<at::Tensor> min_; |
3339 | if (at::functionalization::impl::isFunctionalTensor(min)) { |
3340 | at::functionalization::impl::sync(min); |
3341 | min_ = at::functionalization::impl::from_functional_tensor(min); |
3342 | } else { |
3343 | min_ = min; |
3344 | } |
3345 | |
3346 | c10::optional<at::Tensor> max_; |
3347 | if (at::functionalization::impl::isFunctionalTensor(max)) { |
3348 | at::functionalization::impl::sync(max); |
3349 | max_ = at::functionalization::impl::from_functional_tensor(max); |
3350 | } else { |
3351 | max_ = max; |
3352 | } |
3353 | |
3354 | at::Tensor out_; |
3355 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3356 | at::functionalization::impl::sync(out); |
3357 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3358 | } else { |
3359 | out_ = out; |
3360 | } |
3361 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3362 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(min) || at::functionalization::impl::isFunctionalTensor(max))) { |
3363 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3364 | TORCH_INTERNAL_ASSERT(false, |
3365 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3366 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3367 | } else { |
3368 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3369 | at::AutoDispatchSkipFunctionalize guard; |
3370 | at::Tensor tmp_output = at::_ops::clamp_Tensor_out::call(self_, min_, max_, out_); |
3371 | return out;; |
3372 | } |
3373 | } else { |
3374 | at::Tensor tmp_output; |
3375 | { |
3376 | at::AutoDispatchSkipFunctionalize guard; |
3377 | tmp_output = at::_ops::clamp_Tensor::call(self_, min_, max_); |
3378 | } |
3379 | at::functionalization::impl::replace_(out, tmp_output); |
3380 | at::functionalization::impl::commit_update(out); |
3381 | at::functionalization::impl::sync(out); |
3382 | return out; |
3383 | } |
3384 | } |
3385 | |
3386 | at::Tensor & clamp__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) { |
3387 | if (true) { |
3388 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3389 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3390 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3391 | auto self_meta = to_meta(self); |
3392 | auto min_meta = to_meta(min); |
3393 | auto max_meta = to_meta(max); |
3394 | at::AutoDispatchSkipFunctionalize func_guard; |
3395 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3396 | at::_ops::clamp__Tensor::call(self_meta, min_meta, max_meta); |
3397 | } |
3398 | |
3399 | at::Tensor self_; |
3400 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3401 | at::functionalization::impl::sync(self); |
3402 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3403 | } else { |
3404 | self_ = self; |
3405 | } |
3406 | |
3407 | c10::optional<at::Tensor> min_; |
3408 | if (at::functionalization::impl::isFunctionalTensor(min)) { |
3409 | at::functionalization::impl::sync(min); |
3410 | min_ = at::functionalization::impl::from_functional_tensor(min); |
3411 | } else { |
3412 | min_ = min; |
3413 | } |
3414 | |
3415 | c10::optional<at::Tensor> max_; |
3416 | if (at::functionalization::impl::isFunctionalTensor(max)) { |
3417 | at::functionalization::impl::sync(max); |
3418 | max_ = at::functionalization::impl::from_functional_tensor(max); |
3419 | } else { |
3420 | max_ = max; |
3421 | } |
3422 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
3423 | if ((false || at::functionalization::impl::isFunctionalTensor(min) || at::functionalization::impl::isFunctionalTensor(max))) { |
3424 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3425 | TORCH_INTERNAL_ASSERT(false, |
3426 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3427 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3428 | } else { |
3429 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3430 | at::AutoDispatchSkipFunctionalize guard; |
3431 | at::Tensor tmp_output = at::_ops::clamp__Tensor::call(self_, min_, max_); |
3432 | return self;; |
3433 | } |
3434 | } else { |
3435 | at::Tensor tmp_output; |
3436 | { |
3437 | at::AutoDispatchSkipFunctionalize guard; |
3438 | tmp_output = at::_ops::clamp_Tensor::call(self_, min_, max_); |
3439 | } |
3440 | at::functionalization::impl::replace_(self, tmp_output); |
3441 | at::functionalization::impl::commit_update(self); |
3442 | at::functionalization::impl::sync(self); |
3443 | return self; |
3444 | } |
3445 | } |
3446 | |
3447 | at::Tensor & _convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) { |
3448 | if (false) { |
3449 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3450 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3451 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3452 | auto input_meta = to_meta(input); |
3453 | auto weight_meta = to_meta(weight); |
3454 | auto bias_meta = to_meta(bias); |
3455 | auto out_meta = to_meta(out); |
3456 | at::AutoDispatchSkipFunctionalize func_guard; |
3457 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3458 | at::_ops::_convolution_out::call(input_meta, weight_meta, bias_meta, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out_meta); |
3459 | } |
3460 | |
3461 | at::Tensor input_; |
3462 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
3463 | at::functionalization::impl::sync(input); |
3464 | input_ = at::functionalization::impl::from_functional_tensor(input); |
3465 | } else { |
3466 | input_ = input; |
3467 | } |
3468 | |
3469 | at::Tensor weight_; |
3470 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
3471 | at::functionalization::impl::sync(weight); |
3472 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
3473 | } else { |
3474 | weight_ = weight; |
3475 | } |
3476 | |
3477 | c10::optional<at::Tensor> bias_; |
3478 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
3479 | at::functionalization::impl::sync(bias); |
3480 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
3481 | } else { |
3482 | bias_ = bias; |
3483 | } |
3484 | |
3485 | at::Tensor out_; |
3486 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3487 | at::functionalization::impl::sync(out); |
3488 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3489 | } else { |
3490 | out_ = out; |
3491 | } |
3492 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3493 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
3494 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3495 | TORCH_INTERNAL_ASSERT(false, |
3496 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3497 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3498 | } else { |
3499 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3500 | at::AutoDispatchSkipFunctionalize guard; |
3501 | at::Tensor tmp_output = at::_ops::_convolution_out::call(input_, weight_, bias_, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out_); |
3502 | return out;; |
3503 | } |
3504 | } else { |
3505 | at::Tensor tmp_output; |
3506 | { |
3507 | at::AutoDispatchSkipFunctionalize guard; |
3508 | tmp_output = at::_ops::_convolution::call(input_, weight_, bias_, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32); |
3509 | } |
3510 | at::functionalization::impl::replace_(out, tmp_output); |
3511 | at::functionalization::impl::commit_update(out); |
3512 | at::functionalization::impl::sync(out); |
3513 | return out; |
3514 | } |
3515 | } |
3516 | |
3517 | at::Tensor & copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) { |
3518 | if (false) { |
3519 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3520 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3521 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3522 | auto self_meta = to_meta(self); |
3523 | auto src_meta = to_meta(src); |
3524 | auto out_meta = to_meta(out); |
3525 | at::AutoDispatchSkipFunctionalize func_guard; |
3526 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3527 | at::_ops::copy_out::call(self_meta, src_meta, non_blocking, out_meta); |
3528 | } |
3529 | |
3530 | at::Tensor self_; |
3531 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3532 | at::functionalization::impl::sync(self); |
3533 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3534 | } else { |
3535 | self_ = self; |
3536 | } |
3537 | |
3538 | at::Tensor src_; |
3539 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
3540 | at::functionalization::impl::sync(src); |
3541 | src_ = at::functionalization::impl::from_functional_tensor(src); |
3542 | } else { |
3543 | src_ = src; |
3544 | } |
3545 | |
3546 | at::Tensor out_; |
3547 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3548 | at::functionalization::impl::sync(out); |
3549 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3550 | } else { |
3551 | out_ = out; |
3552 | } |
3553 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3554 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(src))) { |
3555 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3556 | TORCH_INTERNAL_ASSERT(false, |
3557 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3558 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3559 | } else { |
3560 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3561 | at::AutoDispatchSkipFunctionalize guard; |
3562 | at::Tensor tmp_output = at::_ops::copy_out::call(self_, src_, non_blocking, out_); |
3563 | return out;; |
3564 | } |
3565 | } else { |
3566 | at::Tensor tmp_output; |
3567 | { |
3568 | at::AutoDispatchSkipFunctionalize guard; |
3569 | tmp_output = at::_ops::copy::call(self_, src_, non_blocking); |
3570 | } |
3571 | at::functionalization::impl::replace_(out, tmp_output); |
3572 | at::functionalization::impl::commit_update(out); |
3573 | at::functionalization::impl::sync(out); |
3574 | return out; |
3575 | } |
3576 | } |
3577 | |
3578 | at::Tensor & copy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
3579 | if (true) { |
3580 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3581 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3582 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3583 | auto self_meta = to_meta(self); |
3584 | auto src_meta = to_meta(src); |
3585 | at::AutoDispatchSkipFunctionalize func_guard; |
3586 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3587 | at::_ops::copy_::call(self_meta, src_meta, non_blocking); |
3588 | } |
3589 | |
3590 | at::Tensor self_; |
3591 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3592 | at::functionalization::impl::sync(self); |
3593 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3594 | } else { |
3595 | self_ = self; |
3596 | } |
3597 | |
3598 | at::Tensor src_; |
3599 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
3600 | at::functionalization::impl::sync(src); |
3601 | src_ = at::functionalization::impl::from_functional_tensor(src); |
3602 | } else { |
3603 | src_ = src; |
3604 | } |
3605 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
3606 | if ((false || at::functionalization::impl::isFunctionalTensor(src))) { |
3607 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3608 | TORCH_INTERNAL_ASSERT(false, |
3609 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3610 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3611 | } else { |
3612 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3613 | at::AutoDispatchSkipFunctionalize guard; |
3614 | at::Tensor tmp_output = at::_ops::copy_::call(self_, src_, non_blocking); |
3615 | return self;; |
3616 | } |
3617 | } else { |
3618 | at::Tensor tmp_output; |
3619 | { |
3620 | at::AutoDispatchSkipFunctionalize guard; |
3621 | tmp_output = at::_ops::copy::call(self_, src_, non_blocking); |
3622 | } |
3623 | at::functionalization::impl::replace_(self, tmp_output); |
3624 | at::functionalization::impl::commit_update(self); |
3625 | at::functionalization::impl::sync(self); |
3626 | return self; |
3627 | } |
3628 | } |
3629 | |
3630 | at::Tensor & cudnn_convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) { |
3631 | if (false) { |
3632 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3633 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3634 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3635 | auto self_meta = to_meta(self); |
3636 | auto weight_meta = to_meta(weight); |
3637 | auto out_meta = to_meta(out); |
3638 | at::AutoDispatchSkipFunctionalize func_guard; |
3639 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3640 | at::_ops::cudnn_convolution_out::call(self_meta, weight_meta, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out_meta); |
3641 | } |
3642 | |
3643 | at::Tensor self_; |
3644 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3645 | at::functionalization::impl::sync(self); |
3646 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3647 | } else { |
3648 | self_ = self; |
3649 | } |
3650 | |
3651 | at::Tensor weight_; |
3652 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
3653 | at::functionalization::impl::sync(weight); |
3654 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
3655 | } else { |
3656 | weight_ = weight; |
3657 | } |
3658 | |
3659 | at::Tensor out_; |
3660 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3661 | at::functionalization::impl::sync(out); |
3662 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3663 | } else { |
3664 | out_ = out; |
3665 | } |
3666 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3667 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) { |
3668 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3669 | TORCH_INTERNAL_ASSERT(false, |
3670 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3671 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3672 | } else { |
3673 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3674 | at::AutoDispatchSkipFunctionalize guard; |
3675 | at::Tensor tmp_output = at::_ops::cudnn_convolution_out::call(self_, weight_, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out_); |
3676 | return out;; |
3677 | } |
3678 | } else { |
3679 | at::Tensor tmp_output; |
3680 | { |
3681 | at::AutoDispatchSkipFunctionalize guard; |
3682 | tmp_output = at::_ops::cudnn_convolution::call(self_, weight_, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); |
3683 | } |
3684 | at::functionalization::impl::replace_(out, tmp_output); |
3685 | at::functionalization::impl::commit_update(out); |
3686 | at::functionalization::impl::sync(out); |
3687 | return out; |
3688 | } |
3689 | } |
3690 | |
3691 | at::Tensor & divide_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
3692 | if (false) { |
3693 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3694 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3695 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3696 | auto self_meta = to_meta(self); |
3697 | auto other_meta = to_meta(other); |
3698 | auto out_meta = to_meta(out); |
3699 | at::AutoDispatchSkipFunctionalize func_guard; |
3700 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3701 | at::_ops::divide_out::call(self_meta, other_meta, out_meta); |
3702 | } |
3703 | |
3704 | at::Tensor self_; |
3705 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3706 | at::functionalization::impl::sync(self); |
3707 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3708 | } else { |
3709 | self_ = self; |
3710 | } |
3711 | |
3712 | at::Tensor other_; |
3713 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
3714 | at::functionalization::impl::sync(other); |
3715 | other_ = at::functionalization::impl::from_functional_tensor(other); |
3716 | } else { |
3717 | other_ = other; |
3718 | } |
3719 | |
3720 | at::Tensor out_; |
3721 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3722 | at::functionalization::impl::sync(out); |
3723 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3724 | } else { |
3725 | out_ = out; |
3726 | } |
3727 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3728 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
3729 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3730 | TORCH_INTERNAL_ASSERT(false, |
3731 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3732 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3733 | } else { |
3734 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3735 | at::AutoDispatchSkipFunctionalize guard; |
3736 | at::Tensor tmp_output = at::_ops::divide_out::call(self_, other_, out_); |
3737 | return out;; |
3738 | } |
3739 | } else { |
3740 | at::Tensor tmp_output; |
3741 | { |
3742 | at::AutoDispatchSkipFunctionalize guard; |
3743 | tmp_output = at::_ops::divide_Tensor::call(self_, other_); |
3744 | } |
3745 | at::functionalization::impl::replace_(out, tmp_output); |
3746 | at::functionalization::impl::commit_update(out); |
3747 | at::functionalization::impl::sync(out); |
3748 | return out; |
3749 | } |
3750 | } |
3751 | |
3752 | at::Tensor & divide__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
3753 | if (true) { |
3754 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3755 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3756 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3757 | auto self_meta = to_meta(self); |
3758 | auto other_meta = to_meta(other); |
3759 | at::AutoDispatchSkipFunctionalize func_guard; |
3760 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3761 | at::_ops::divide__Tensor::call(self_meta, other_meta); |
3762 | } |
3763 | |
3764 | at::Tensor self_; |
3765 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3766 | at::functionalization::impl::sync(self); |
3767 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3768 | } else { |
3769 | self_ = self; |
3770 | } |
3771 | |
3772 | at::Tensor other_; |
3773 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
3774 | at::functionalization::impl::sync(other); |
3775 | other_ = at::functionalization::impl::from_functional_tensor(other); |
3776 | } else { |
3777 | other_ = other; |
3778 | } |
3779 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
3780 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
3781 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3782 | TORCH_INTERNAL_ASSERT(false, |
3783 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3784 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3785 | } else { |
3786 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3787 | at::AutoDispatchSkipFunctionalize guard; |
3788 | at::Tensor tmp_output = at::_ops::divide__Tensor::call(self_, other_); |
3789 | return self;; |
3790 | } |
3791 | } else { |
3792 | at::Tensor tmp_output; |
3793 | { |
3794 | at::AutoDispatchSkipFunctionalize guard; |
3795 | tmp_output = at::_ops::divide_Tensor::call(self_, other_); |
3796 | } |
3797 | at::functionalization::impl::replace_(self, tmp_output); |
3798 | at::functionalization::impl::commit_update(self); |
3799 | at::functionalization::impl::sync(self); |
3800 | return self; |
3801 | } |
3802 | } |
3803 | |
3804 | at::Tensor & divide_out_out_mode(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) { |
3805 | if (false) { |
3806 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3807 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3808 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3809 | auto self_meta = to_meta(self); |
3810 | auto other_meta = to_meta(other); |
3811 | auto out_meta = to_meta(out); |
3812 | at::AutoDispatchSkipFunctionalize func_guard; |
3813 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3814 | at::_ops::divide_out_mode::call(self_meta, other_meta, rounding_mode, out_meta); |
3815 | } |
3816 | |
3817 | at::Tensor self_; |
3818 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3819 | at::functionalization::impl::sync(self); |
3820 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3821 | } else { |
3822 | self_ = self; |
3823 | } |
3824 | |
3825 | at::Tensor other_; |
3826 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
3827 | at::functionalization::impl::sync(other); |
3828 | other_ = at::functionalization::impl::from_functional_tensor(other); |
3829 | } else { |
3830 | other_ = other; |
3831 | } |
3832 | |
3833 | at::Tensor out_; |
3834 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3835 | at::functionalization::impl::sync(out); |
3836 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3837 | } else { |
3838 | out_ = out; |
3839 | } |
3840 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3841 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
3842 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3843 | TORCH_INTERNAL_ASSERT(false, |
3844 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3845 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3846 | } else { |
3847 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3848 | at::AutoDispatchSkipFunctionalize guard; |
3849 | at::Tensor tmp_output = at::_ops::divide_out_mode::call(self_, other_, rounding_mode, out_); |
3850 | return out;; |
3851 | } |
3852 | } else { |
3853 | at::Tensor tmp_output; |
3854 | { |
3855 | at::AutoDispatchSkipFunctionalize guard; |
3856 | tmp_output = at::_ops::divide_Tensor_mode::call(self_, other_, rounding_mode); |
3857 | } |
3858 | at::functionalization::impl::replace_(out, tmp_output); |
3859 | at::functionalization::impl::commit_update(out); |
3860 | at::functionalization::impl::sync(out); |
3861 | return out; |
3862 | } |
3863 | } |
3864 | |
3865 | at::Tensor & divide__Tensor_mode(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) { |
3866 | if (true) { |
3867 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3868 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3869 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3870 | auto self_meta = to_meta(self); |
3871 | auto other_meta = to_meta(other); |
3872 | at::AutoDispatchSkipFunctionalize func_guard; |
3873 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3874 | at::_ops::divide__Tensor_mode::call(self_meta, other_meta, rounding_mode); |
3875 | } |
3876 | |
3877 | at::Tensor self_; |
3878 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3879 | at::functionalization::impl::sync(self); |
3880 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3881 | } else { |
3882 | self_ = self; |
3883 | } |
3884 | |
3885 | at::Tensor other_; |
3886 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
3887 | at::functionalization::impl::sync(other); |
3888 | other_ = at::functionalization::impl::from_functional_tensor(other); |
3889 | } else { |
3890 | other_ = other; |
3891 | } |
3892 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
3893 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
3894 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3895 | TORCH_INTERNAL_ASSERT(false, |
3896 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3897 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3898 | } else { |
3899 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3900 | at::AutoDispatchSkipFunctionalize guard; |
3901 | at::Tensor tmp_output = at::_ops::divide__Tensor_mode::call(self_, other_, rounding_mode); |
3902 | return self;; |
3903 | } |
3904 | } else { |
3905 | at::Tensor tmp_output; |
3906 | { |
3907 | at::AutoDispatchSkipFunctionalize guard; |
3908 | tmp_output = at::_ops::divide_Tensor_mode::call(self_, other_, rounding_mode); |
3909 | } |
3910 | at::functionalization::impl::replace_(self, tmp_output); |
3911 | at::functionalization::impl::commit_update(self); |
3912 | at::functionalization::impl::sync(self); |
3913 | return self; |
3914 | } |
3915 | } |
3916 | |
3917 | at::Tensor & true_divide_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
3918 | if (false) { |
3919 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3920 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3921 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3922 | auto self_meta = to_meta(self); |
3923 | auto other_meta = to_meta(other); |
3924 | auto out_meta = to_meta(out); |
3925 | at::AutoDispatchSkipFunctionalize func_guard; |
3926 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3927 | at::_ops::true_divide_out::call(self_meta, other_meta, out_meta); |
3928 | } |
3929 | |
3930 | at::Tensor self_; |
3931 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3932 | at::functionalization::impl::sync(self); |
3933 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3934 | } else { |
3935 | self_ = self; |
3936 | } |
3937 | |
3938 | at::Tensor other_; |
3939 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
3940 | at::functionalization::impl::sync(other); |
3941 | other_ = at::functionalization::impl::from_functional_tensor(other); |
3942 | } else { |
3943 | other_ = other; |
3944 | } |
3945 | |
3946 | at::Tensor out_; |
3947 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3948 | at::functionalization::impl::sync(out); |
3949 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3950 | } else { |
3951 | out_ = out; |
3952 | } |
3953 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3954 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
3955 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3956 | TORCH_INTERNAL_ASSERT(false, |
3957 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3958 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3959 | } else { |
3960 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3961 | at::AutoDispatchSkipFunctionalize guard; |
3962 | at::Tensor tmp_output = at::_ops::true_divide_out::call(self_, other_, out_); |
3963 | return out;; |
3964 | } |
3965 | } else { |
3966 | at::Tensor tmp_output; |
3967 | { |
3968 | at::AutoDispatchSkipFunctionalize guard; |
3969 | tmp_output = at::_ops::true_divide_Tensor::call(self_, other_); |
3970 | } |
3971 | at::functionalization::impl::replace_(out, tmp_output); |
3972 | at::functionalization::impl::commit_update(out); |
3973 | at::functionalization::impl::sync(out); |
3974 | return out; |
3975 | } |
3976 | } |
3977 | |
3978 | at::Tensor & true_divide__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
3979 | if (true) { |
3980 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3981 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3982 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3983 | auto self_meta = to_meta(self); |
3984 | auto other_meta = to_meta(other); |
3985 | at::AutoDispatchSkipFunctionalize func_guard; |
3986 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3987 | at::_ops::true_divide__Tensor::call(self_meta, other_meta); |
3988 | } |
3989 | |
3990 | at::Tensor self_; |
3991 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3992 | at::functionalization::impl::sync(self); |
3993 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3994 | } else { |
3995 | self_ = self; |
3996 | } |
3997 | |
3998 | at::Tensor other_; |
3999 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
4000 | at::functionalization::impl::sync(other); |
4001 | other_ = at::functionalization::impl::from_functional_tensor(other); |
4002 | } else { |
4003 | other_ = other; |
4004 | } |
4005 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
4006 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
4007 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4008 | TORCH_INTERNAL_ASSERT(false, |
4009 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4010 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4011 | } else { |
4012 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4013 | at::AutoDispatchSkipFunctionalize guard; |
4014 | at::Tensor tmp_output = at::_ops::true_divide__Tensor::call(self_, other_); |
4015 | return self;; |
4016 | } |
4017 | } else { |
4018 | at::Tensor tmp_output; |
4019 | { |
4020 | at::AutoDispatchSkipFunctionalize guard; |
4021 | tmp_output = at::_ops::true_divide_Tensor::call(self_, other_); |
4022 | } |
4023 | at::functionalization::impl::replace_(self, tmp_output); |
4024 | at::functionalization::impl::commit_update(self); |
4025 | at::functionalization::impl::sync(self); |
4026 | return self; |
4027 | } |
4028 | } |
4029 | |
4030 | at::Tensor & dot_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor, at::Tensor & out) { |
4031 | if (false) { |
4032 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4033 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4034 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4035 | auto self_meta = to_meta(self); |
4036 | auto tensor_meta = to_meta(tensor); |
4037 | auto out_meta = to_meta(out); |
4038 | at::AutoDispatchSkipFunctionalize func_guard; |
4039 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4040 | at::_ops::dot_out::call(self_meta, tensor_meta, out_meta); |
4041 | } |
4042 | |
4043 | at::Tensor self_; |
4044 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4045 | at::functionalization::impl::sync(self); |
4046 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4047 | } else { |
4048 | self_ = self; |
4049 | } |
4050 | |
4051 | at::Tensor tensor_; |
4052 | if (at::functionalization::impl::isFunctionalTensor(tensor)) { |
4053 | at::functionalization::impl::sync(tensor); |
4054 | tensor_ = at::functionalization::impl::from_functional_tensor(tensor); |
4055 | } else { |
4056 | tensor_ = tensor; |
4057 | } |
4058 | |
4059 | at::Tensor out_; |
4060 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4061 | at::functionalization::impl::sync(out); |
4062 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4063 | } else { |
4064 | out_ = out; |
4065 | } |
4066 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4067 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor))) { |
4068 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4069 | TORCH_INTERNAL_ASSERT(false, |
4070 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4071 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4072 | } else { |
4073 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4074 | at::AutoDispatchSkipFunctionalize guard; |
4075 | at::Tensor tmp_output = at::_ops::dot_out::call(self_, tensor_, out_); |
4076 | return out;; |
4077 | } |
4078 | } else { |
4079 | at::Tensor tmp_output; |
4080 | { |
4081 | at::AutoDispatchSkipFunctionalize guard; |
4082 | tmp_output = at::_ops::dot::call(self_, tensor_); |
4083 | } |
4084 | at::functionalization::impl::replace_(out, tmp_output); |
4085 | at::functionalization::impl::commit_update(out); |
4086 | at::functionalization::impl::sync(out); |
4087 | return out; |
4088 | } |
4089 | } |
4090 | |
4091 | at::Tensor & vdot_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
4092 | if (false) { |
4093 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4094 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4095 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4096 | auto self_meta = to_meta(self); |
4097 | auto other_meta = to_meta(other); |
4098 | auto out_meta = to_meta(out); |
4099 | at::AutoDispatchSkipFunctionalize func_guard; |
4100 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4101 | at::_ops::vdot_out::call(self_meta, other_meta, out_meta); |
4102 | } |
4103 | |
4104 | at::Tensor self_; |
4105 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4106 | at::functionalization::impl::sync(self); |
4107 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4108 | } else { |
4109 | self_ = self; |
4110 | } |
4111 | |
4112 | at::Tensor other_; |
4113 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
4114 | at::functionalization::impl::sync(other); |
4115 | other_ = at::functionalization::impl::from_functional_tensor(other); |
4116 | } else { |
4117 | other_ = other; |
4118 | } |
4119 | |
4120 | at::Tensor out_; |
4121 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4122 | at::functionalization::impl::sync(out); |
4123 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4124 | } else { |
4125 | out_ = out; |
4126 | } |
4127 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4128 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
4129 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4130 | TORCH_INTERNAL_ASSERT(false, |
4131 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4132 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4133 | } else { |
4134 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4135 | at::AutoDispatchSkipFunctionalize guard; |
4136 | at::Tensor tmp_output = at::_ops::vdot_out::call(self_, other_, out_); |
4137 | return out;; |
4138 | } |
4139 | } else { |
4140 | at::Tensor tmp_output; |
4141 | { |
4142 | at::AutoDispatchSkipFunctionalize guard; |
4143 | tmp_output = at::_ops::vdot::call(self_, other_); |
4144 | } |
4145 | at::functionalization::impl::replace_(out, tmp_output); |
4146 | at::functionalization::impl::commit_update(out); |
4147 | at::functionalization::impl::sync(out); |
4148 | return out; |
4149 | } |
4150 | } |
4151 | |
4152 | at::Tensor & row_stack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { |
4153 | if (false) { |
4154 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4155 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4156 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4157 | auto tensors_meta = to_meta(tensors); |
4158 | auto out_meta = to_meta(out); |
4159 | at::AutoDispatchSkipFunctionalize func_guard; |
4160 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4161 | at::_ops::row_stack_out::call(tensors_meta, out_meta); |
4162 | } |
4163 | |
4164 | ::std::vector<at::Tensor> tensors_; |
4165 | if (at::functionalization::impl::isFunctionalTensor(tensors)) { |
4166 | at::functionalization::impl::sync(tensors); |
4167 | tensors_ = at::functionalization::impl::from_functional_tensor(tensors); |
4168 | } else { |
4169 | tensors_ = tensors.vec(); |
4170 | } |
4171 | |
4172 | at::Tensor out_; |
4173 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4174 | at::functionalization::impl::sync(out); |
4175 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4176 | } else { |
4177 | out_ = out; |
4178 | } |
4179 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4180 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) { |
4181 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4182 | TORCH_INTERNAL_ASSERT(false, |
4183 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4184 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4185 | } else { |
4186 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4187 | at::AutoDispatchSkipFunctionalize guard; |
4188 | at::Tensor tmp_output = at::_ops::row_stack_out::call(tensors_, out_); |
4189 | return out;; |
4190 | } |
4191 | } else { |
4192 | at::Tensor tmp_output; |
4193 | { |
4194 | at::AutoDispatchSkipFunctionalize guard; |
4195 | tmp_output = at::_ops::row_stack::call(tensors_); |
4196 | } |
4197 | at::functionalization::impl::replace_(out, tmp_output); |
4198 | at::functionalization::impl::commit_update(out); |
4199 | at::functionalization::impl::sync(out); |
4200 | return out; |
4201 | } |
4202 | } |
4203 | |
4204 | at::Tensor & new_empty_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
4205 | if (false) { |
4206 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4207 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4208 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4209 | auto self_meta = to_meta(self); |
4210 | auto out_meta = to_meta(out); |
4211 | at::AutoDispatchSkipFunctionalize func_guard; |
4212 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4213 | at::_ops::new_empty_out::call(self_meta, size, out_meta); |
4214 | } |
4215 | |
4216 | at::Tensor self_; |
4217 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4218 | at::functionalization::impl::sync(self); |
4219 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4220 | } else { |
4221 | self_ = self; |
4222 | } |
4223 | |
4224 | at::Tensor out_; |
4225 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4226 | at::functionalization::impl::sync(out); |
4227 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4228 | } else { |
4229 | out_ = out; |
4230 | } |
4231 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4232 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4233 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4234 | TORCH_INTERNAL_ASSERT(false, |
4235 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4236 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4237 | } else { |
4238 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4239 | at::AutoDispatchSkipFunctionalize guard; |
4240 | at::Tensor tmp_output = at::_ops::new_empty_out::call(self_, size, out_); |
4241 | return out;; |
4242 | } |
4243 | } else { |
4244 | at::Tensor tmp_output; |
4245 | { |
4246 | at::AutoDispatchSkipFunctionalize guard; |
4247 | tmp_output = at::_ops::new_empty::call(self_, size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
4248 | } |
4249 | at::functionalization::impl::replace_(out, tmp_output); |
4250 | at::functionalization::impl::commit_update(out); |
4251 | at::functionalization::impl::sync(out); |
4252 | return out; |
4253 | } |
4254 | } |
4255 | |
4256 | at::Tensor & new_empty_strided_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
4257 | if (false) { |
4258 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4259 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4260 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4261 | auto self_meta = to_meta(self); |
4262 | auto out_meta = to_meta(out); |
4263 | at::AutoDispatchSkipFunctionalize func_guard; |
4264 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4265 | at::_ops::new_empty_strided_out::call(self_meta, size, stride, out_meta); |
4266 | } |
4267 | |
4268 | at::Tensor self_; |
4269 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4270 | at::functionalization::impl::sync(self); |
4271 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4272 | } else { |
4273 | self_ = self; |
4274 | } |
4275 | |
4276 | at::Tensor out_; |
4277 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4278 | at::functionalization::impl::sync(out); |
4279 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4280 | } else { |
4281 | out_ = out; |
4282 | } |
4283 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4284 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4285 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4286 | TORCH_INTERNAL_ASSERT(false, |
4287 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4288 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4289 | } else { |
4290 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4291 | at::AutoDispatchSkipFunctionalize guard; |
4292 | at::Tensor tmp_output = at::_ops::new_empty_strided_out::call(self_, size, stride, out_); |
4293 | return out;; |
4294 | } |
4295 | } else { |
4296 | at::Tensor tmp_output; |
4297 | { |
4298 | at::AutoDispatchSkipFunctionalize guard; |
4299 | tmp_output = at::_ops::new_empty_strided::call(self_, size, stride, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
4300 | } |
4301 | at::functionalization::impl::replace_(out, tmp_output); |
4302 | at::functionalization::impl::commit_update(out); |
4303 | at::functionalization::impl::sync(out); |
4304 | return out; |
4305 | } |
4306 | } |
4307 | |
4308 | at::Tensor & _empty_affine_quantized_out_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
4309 | if (false) { |
4310 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4311 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4312 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4313 | auto out_meta = to_meta(out); |
4314 | at::AutoDispatchSkipFunctionalize func_guard; |
4315 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4316 | at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out_meta); |
4317 | } |
4318 | |
4319 | at::Tensor out_; |
4320 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4321 | at::functionalization::impl::sync(out); |
4322 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4323 | } else { |
4324 | out_ = out; |
4325 | } |
4326 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4327 | if ((false)) { |
4328 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4329 | TORCH_INTERNAL_ASSERT(false, |
4330 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4331 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4332 | } else { |
4333 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4334 | at::AutoDispatchSkipFunctionalize guard; |
4335 | at::Tensor tmp_output = at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out_); |
4336 | return out;; |
4337 | } |
4338 | } else { |
4339 | at::Tensor tmp_output; |
4340 | { |
4341 | at::AutoDispatchSkipFunctionalize guard; |
4342 | tmp_output = at::_ops::_empty_affine_quantized::call(size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, scale, zero_point, memory_format); |
4343 | } |
4344 | at::functionalization::impl::replace_(out, tmp_output); |
4345 | at::functionalization::impl::commit_update(out); |
4346 | at::functionalization::impl::sync(out); |
4347 | return out; |
4348 | } |
4349 | } |
4350 | |
4351 | const at::Tensor & _resize_output_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Device device, const at::Tensor & out) { |
4352 | if (false) { |
4353 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4354 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4355 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4356 | auto self_meta = to_meta(self); |
4357 | auto out_meta = to_meta(out); |
4358 | at::AutoDispatchSkipFunctionalize func_guard; |
4359 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4360 | at::_ops::_resize_output_out::call(self_meta, size, device, out_meta); |
4361 | } |
4362 | |
4363 | at::Tensor self_; |
4364 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4365 | at::functionalization::impl::sync(self); |
4366 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4367 | } else { |
4368 | self_ = self; |
4369 | } |
4370 | |
4371 | at::Tensor out_; |
4372 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4373 | at::functionalization::impl::sync(out); |
4374 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4375 | } else { |
4376 | out_ = out; |
4377 | } |
4378 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4379 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4380 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4381 | TORCH_INTERNAL_ASSERT(false, |
4382 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4383 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4384 | } else { |
4385 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4386 | at::AutoDispatchSkipFunctionalize guard; |
4387 | at::Tensor tmp_output = at::_ops::_resize_output_out::call(self_, size, device, out_); |
4388 | return out;; |
4389 | } |
4390 | } else { |
4391 | at::Tensor tmp_output; |
4392 | { |
4393 | at::AutoDispatchSkipFunctionalize guard; |
4394 | tmp_output = at::_ops::_resize_output::call(self_, size, device); |
4395 | } |
4396 | at::functionalization::impl::replace_(out, tmp_output); |
4397 | at::functionalization::impl::commit_update(out); |
4398 | at::functionalization::impl::sync(out); |
4399 | return out; |
4400 | } |
4401 | } |
4402 | |
4403 | const at::Tensor & _resize_output_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Device device) { |
4404 | if (true) { |
4405 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4406 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4407 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4408 | auto self_meta = to_meta(self); |
4409 | at::AutoDispatchSkipFunctionalize func_guard; |
4410 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4411 | at::_ops::_resize_output_::call(self_meta, size, device); |
4412 | } |
4413 | |
4414 | at::Tensor self_; |
4415 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4416 | at::functionalization::impl::sync(self); |
4417 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4418 | } else { |
4419 | self_ = self; |
4420 | } |
4421 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
4422 | if ((false)) { |
4423 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4424 | TORCH_INTERNAL_ASSERT(false, |
4425 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4426 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4427 | } else { |
4428 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4429 | at::AutoDispatchSkipFunctionalize guard; |
4430 | at::Tensor tmp_output = at::_ops::_resize_output_::call(self_, size, device); |
4431 | return self;; |
4432 | } |
4433 | } else { |
4434 | at::Tensor tmp_output; |
4435 | { |
4436 | at::AutoDispatchSkipFunctionalize guard; |
4437 | tmp_output = at::_ops::_resize_output::call(self_, size, device); |
4438 | } |
4439 | at::functionalization::impl::replace_(self, tmp_output); |
4440 | at::functionalization::impl::commit_update(self); |
4441 | at::functionalization::impl::sync(self); |
4442 | return self; |
4443 | } |
4444 | } |
4445 | |
4446 | at::Tensor & frac_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
4447 | if (false) { |
4448 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4449 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4450 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4451 | auto self_meta = to_meta(self); |
4452 | auto out_meta = to_meta(out); |
4453 | at::AutoDispatchSkipFunctionalize func_guard; |
4454 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4455 | at::_ops::frac_out::call(self_meta, out_meta); |
4456 | } |
4457 | |
4458 | at::Tensor self_; |
4459 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4460 | at::functionalization::impl::sync(self); |
4461 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4462 | } else { |
4463 | self_ = self; |
4464 | } |
4465 | |
4466 | at::Tensor out_; |
4467 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4468 | at::functionalization::impl::sync(out); |
4469 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4470 | } else { |
4471 | out_ = out; |
4472 | } |
4473 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4474 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4475 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4476 | TORCH_INTERNAL_ASSERT(false, |
4477 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4478 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4479 | } else { |
4480 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4481 | at::AutoDispatchSkipFunctionalize guard; |
4482 | at::Tensor tmp_output = at::_ops::frac_out::call(self_, out_); |
4483 | return out;; |
4484 | } |
4485 | } else { |
4486 | at::Tensor tmp_output; |
4487 | { |
4488 | at::AutoDispatchSkipFunctionalize guard; |
4489 | tmp_output = at::_ops::frac::call(self_); |
4490 | } |
4491 | at::functionalization::impl::replace_(out, tmp_output); |
4492 | at::functionalization::impl::commit_update(out); |
4493 | at::functionalization::impl::sync(out); |
4494 | return out; |
4495 | } |
4496 | } |
4497 | |
4498 | at::Tensor & frac_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
4499 | if (true) { |
4500 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4501 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4502 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4503 | auto self_meta = to_meta(self); |
4504 | at::AutoDispatchSkipFunctionalize func_guard; |
4505 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4506 | at::_ops::frac_::call(self_meta); |
4507 | } |
4508 | |
4509 | at::Tensor self_; |
4510 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4511 | at::functionalization::impl::sync(self); |
4512 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4513 | } else { |
4514 | self_ = self; |
4515 | } |
4516 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
4517 | if ((false)) { |
4518 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4519 | TORCH_INTERNAL_ASSERT(false, |
4520 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4521 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4522 | } else { |
4523 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4524 | at::AutoDispatchSkipFunctionalize guard; |
4525 | at::Tensor tmp_output = at::_ops::frac_::call(self_); |
4526 | return self;; |
4527 | } |
4528 | } else { |
4529 | at::Tensor tmp_output; |
4530 | { |
4531 | at::AutoDispatchSkipFunctionalize guard; |
4532 | tmp_output = at::_ops::frac::call(self_); |
4533 | } |
4534 | at::functionalization::impl::replace_(self, tmp_output); |
4535 | at::functionalization::impl::commit_update(self); |
4536 | at::functionalization::impl::sync(self); |
4537 | return self; |
4538 | } |
4539 | } |
4540 | |
4541 | at::Tensor & full_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
4542 | if (false) { |
4543 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4544 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4545 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4546 | auto self_meta = to_meta(self); |
4547 | auto out_meta = to_meta(out); |
4548 | at::AutoDispatchSkipFunctionalize func_guard; |
4549 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4550 | at::_ops::full_like_out::call(self_meta, fill_value, memory_format, out_meta); |
4551 | } |
4552 | |
4553 | at::Tensor self_; |
4554 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4555 | at::functionalization::impl::sync(self); |
4556 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4557 | } else { |
4558 | self_ = self; |
4559 | } |
4560 | |
4561 | at::Tensor out_; |
4562 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4563 | at::functionalization::impl::sync(out); |
4564 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4565 | } else { |
4566 | out_ = out; |
4567 | } |
4568 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4569 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4570 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4571 | TORCH_INTERNAL_ASSERT(false, |
4572 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4573 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4574 | } else { |
4575 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4576 | at::AutoDispatchSkipFunctionalize guard; |
4577 | at::Tensor tmp_output = at::_ops::full_like_out::call(self_, fill_value, memory_format, out_); |
4578 | return out;; |
4579 | } |
4580 | } else { |
4581 | at::Tensor tmp_output; |
4582 | { |
4583 | at::AutoDispatchSkipFunctionalize guard; |
4584 | tmp_output = at::_ops::full_like::call(self_, fill_value, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, memory_format); |
4585 | } |
4586 | at::functionalization::impl::replace_(out, tmp_output); |
4587 | at::functionalization::impl::commit_update(out); |
4588 | at::functionalization::impl::sync(out); |
4589 | return out; |
4590 | } |
4591 | } |
4592 | |
4593 | at::Tensor & lcm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
4594 | if (false) { |
4595 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4596 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4597 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4598 | auto self_meta = to_meta(self); |
4599 | auto other_meta = to_meta(other); |
4600 | auto out_meta = to_meta(out); |
4601 | at::AutoDispatchSkipFunctionalize func_guard; |
4602 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4603 | at::_ops::lcm_out::call(self_meta, other_meta, out_meta); |
4604 | } |
4605 | |
4606 | at::Tensor self_; |
4607 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4608 | at::functionalization::impl::sync(self); |
4609 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4610 | } else { |
4611 | self_ = self; |
4612 | } |
4613 | |
4614 | at::Tensor other_; |
4615 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
4616 | at::functionalization::impl::sync(other); |
4617 | other_ = at::functionalization::impl::from_functional_tensor(other); |
4618 | } else { |
4619 | other_ = other; |
4620 | } |
4621 | |
4622 | at::Tensor out_; |
4623 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4624 | at::functionalization::impl::sync(out); |
4625 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4626 | } else { |
4627 | out_ = out; |
4628 | } |
4629 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4630 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
4631 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4632 | TORCH_INTERNAL_ASSERT(false, |
4633 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4634 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4635 | } else { |
4636 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4637 | at::AutoDispatchSkipFunctionalize guard; |
4638 | at::Tensor tmp_output = at::_ops::lcm_out::call(self_, other_, out_); |
4639 | return out;; |
4640 | } |
4641 | } else { |
4642 | at::Tensor tmp_output; |
4643 | { |
4644 | at::AutoDispatchSkipFunctionalize guard; |
4645 | tmp_output = at::_ops::lcm::call(self_, other_); |
4646 | } |
4647 | at::functionalization::impl::replace_(out, tmp_output); |
4648 | at::functionalization::impl::commit_update(out); |
4649 | at::functionalization::impl::sync(out); |
4650 | return out; |
4651 | } |
4652 | } |
4653 | |
4654 | at::Tensor & lcm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
4655 | if (true) { |
4656 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4657 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4658 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4659 | auto self_meta = to_meta(self); |
4660 | auto other_meta = to_meta(other); |
4661 | at::AutoDispatchSkipFunctionalize func_guard; |
4662 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4663 | at::_ops::lcm_::call(self_meta, other_meta); |
4664 | } |
4665 | |
4666 | at::Tensor self_; |
4667 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4668 | at::functionalization::impl::sync(self); |
4669 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4670 | } else { |
4671 | self_ = self; |
4672 | } |
4673 | |
4674 | at::Tensor other_; |
4675 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
4676 | at::functionalization::impl::sync(other); |
4677 | other_ = at::functionalization::impl::from_functional_tensor(other); |
4678 | } else { |
4679 | other_ = other; |
4680 | } |
4681 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
4682 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
4683 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4684 | TORCH_INTERNAL_ASSERT(false, |
4685 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4686 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4687 | } else { |
4688 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4689 | at::AutoDispatchSkipFunctionalize guard; |
4690 | at::Tensor tmp_output = at::_ops::lcm_::call(self_, other_); |
4691 | return self;; |
4692 | } |
4693 | } else { |
4694 | at::Tensor tmp_output; |
4695 | { |
4696 | at::AutoDispatchSkipFunctionalize guard; |
4697 | tmp_output = at::_ops::lcm::call(self_, other_); |
4698 | } |
4699 | at::functionalization::impl::replace_(self, tmp_output); |
4700 | at::functionalization::impl::commit_update(self); |
4701 | at::functionalization::impl::sync(self); |
4702 | return self; |
4703 | } |
4704 | } |
4705 | |
4706 | at::Tensor & hann_window_out_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { |
4707 | if (false) { |
4708 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4709 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4710 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4711 | auto out_meta = to_meta(out); |
4712 | at::AutoDispatchSkipFunctionalize func_guard; |
4713 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4714 | at::_ops::hann_window_out::call(window_length, out_meta); |
4715 | } |
4716 | |
4717 | at::Tensor out_; |
4718 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4719 | at::functionalization::impl::sync(out); |
4720 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4721 | } else { |
4722 | out_ = out; |
4723 | } |
4724 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4725 | if ((false)) { |
4726 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4727 | TORCH_INTERNAL_ASSERT(false, |
4728 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4729 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4730 | } else { |
4731 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4732 | at::AutoDispatchSkipFunctionalize guard; |
4733 | at::Tensor tmp_output = at::_ops::hann_window_out::call(window_length, out_); |
4734 | return out;; |
4735 | } |
4736 | } else { |
4737 | at::Tensor tmp_output; |
4738 | { |
4739 | at::AutoDispatchSkipFunctionalize guard; |
4740 | tmp_output = at::_ops::hann_window::call(window_length, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
4741 | } |
4742 | at::functionalization::impl::replace_(out, tmp_output); |
4743 | at::functionalization::impl::commit_update(out); |
4744 | at::functionalization::impl::sync(out); |
4745 | return out; |
4746 | } |
4747 | } |
4748 | |
4749 | at::Tensor & hann_window_out_periodic_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { |
4750 | if (false) { |
4751 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4752 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4753 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4754 | auto out_meta = to_meta(out); |
4755 | at::AutoDispatchSkipFunctionalize func_guard; |
4756 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4757 | at::_ops::hann_window_periodic_out::call(window_length, periodic, out_meta); |
4758 | } |
4759 | |
4760 | at::Tensor out_; |
4761 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4762 | at::functionalization::impl::sync(out); |
4763 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4764 | } else { |
4765 | out_ = out; |
4766 | } |
4767 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4768 | if ((false)) { |
4769 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4770 | TORCH_INTERNAL_ASSERT(false, |
4771 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4772 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4773 | } else { |
4774 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4775 | at::AutoDispatchSkipFunctionalize guard; |
4776 | at::Tensor tmp_output = at::_ops::hann_window_periodic_out::call(window_length, periodic, out_); |
4777 | return out;; |
4778 | } |
4779 | } else { |
4780 | at::Tensor tmp_output; |
4781 | { |
4782 | at::AutoDispatchSkipFunctionalize guard; |
4783 | tmp_output = at::_ops::hann_window_periodic::call(window_length, periodic, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
4784 | } |
4785 | at::functionalization::impl::replace_(out, tmp_output); |
4786 | at::functionalization::impl::commit_update(out); |
4787 | at::functionalization::impl::sync(out); |
4788 | return out; |
4789 | } |
4790 | } |
4791 | |
4792 | at::Tensor & kaiser_window_out_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { |
4793 | if (false) { |
4794 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4795 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4796 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4797 | auto out_meta = to_meta(out); |
4798 | at::AutoDispatchSkipFunctionalize func_guard; |
4799 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4800 | at::_ops::kaiser_window_out::call(window_length, out_meta); |
4801 | } |
4802 | |
4803 | at::Tensor out_; |
4804 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4805 | at::functionalization::impl::sync(out); |
4806 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4807 | } else { |
4808 | out_ = out; |
4809 | } |
4810 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4811 | if ((false)) { |
4812 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4813 | TORCH_INTERNAL_ASSERT(false, |
4814 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4815 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4816 | } else { |
4817 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4818 | at::AutoDispatchSkipFunctionalize guard; |
4819 | at::Tensor tmp_output = at::_ops::kaiser_window_out::call(window_length, out_); |
4820 | return out;; |
4821 | } |
4822 | } else { |
4823 | at::Tensor tmp_output; |
4824 | { |
4825 | at::AutoDispatchSkipFunctionalize guard; |
4826 | tmp_output = at::_ops::kaiser_window::call(window_length, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
4827 | } |
4828 | at::functionalization::impl::replace_(out, tmp_output); |
4829 | at::functionalization::impl::commit_update(out); |
4830 | at::functionalization::impl::sync(out); |
4831 | return out; |
4832 | } |
4833 | } |
4834 | |
4835 | at::Tensor & kaiser_window_out_periodic_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { |
4836 | if (false) { |
4837 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4838 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4839 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4840 | auto out_meta = to_meta(out); |
4841 | at::AutoDispatchSkipFunctionalize func_guard; |
4842 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4843 | at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out_meta); |
4844 | } |
4845 | |
4846 | at::Tensor out_; |
4847 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4848 | at::functionalization::impl::sync(out); |
4849 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4850 | } else { |
4851 | out_ = out; |
4852 | } |
4853 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4854 | if ((false)) { |
4855 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4856 | TORCH_INTERNAL_ASSERT(false, |
4857 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4858 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4859 | } else { |
4860 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4861 | at::AutoDispatchSkipFunctionalize guard; |
4862 | at::Tensor tmp_output = at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out_); |
4863 | return out;; |
4864 | } |
4865 | } else { |
4866 | at::Tensor tmp_output; |
4867 | { |
4868 | at::AutoDispatchSkipFunctionalize guard; |
4869 | tmp_output = at::_ops::kaiser_window_periodic::call(window_length, periodic, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
4870 | } |
4871 | at::functionalization::impl::replace_(out, tmp_output); |
4872 | at::functionalization::impl::commit_update(out); |
4873 | at::functionalization::impl::sync(out); |
4874 | return out; |
4875 | } |
4876 | } |
4877 | |
4878 | at::Tensor & kaiser_window_out_beta_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, at::Tensor & out) { |
4879 | if (false) { |
4880 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4881 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4882 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4883 | auto out_meta = to_meta(out); |
4884 | at::AutoDispatchSkipFunctionalize func_guard; |
4885 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4886 | at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out_meta); |
4887 | } |
4888 | |
4889 | at::Tensor out_; |
4890 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4891 | at::functionalization::impl::sync(out); |
4892 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4893 | } else { |
4894 | out_ = out; |
4895 | } |
4896 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4897 | if ((false)) { |
4898 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4899 | TORCH_INTERNAL_ASSERT(false, |
4900 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4901 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4902 | } else { |
4903 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4904 | at::AutoDispatchSkipFunctionalize guard; |
4905 | at::Tensor tmp_output = at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out_); |
4906 | return out;; |
4907 | } |
4908 | } else { |
4909 | at::Tensor tmp_output; |
4910 | { |
4911 | at::AutoDispatchSkipFunctionalize guard; |
4912 | tmp_output = at::_ops::kaiser_window_beta::call(window_length, periodic, beta, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
4913 | } |
4914 | at::functionalization::impl::replace_(out, tmp_output); |
4915 | at::functionalization::impl::commit_update(out); |
4916 | at::functionalization::impl::sync(out); |
4917 | return out; |
4918 | } |
4919 | } |
4920 | |
4921 | at::Tensor & _index_put_impl_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) { |
4922 | if (false) { |
4923 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4924 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4925 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4926 | auto self_meta = to_meta(self); |
4927 | auto indices_meta = to_meta(indices); |
4928 | auto values_meta = to_meta(values); |
4929 | auto out_meta = to_meta(out); |
4930 | at::AutoDispatchSkipFunctionalize func_guard; |
4931 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4932 | at::_ops::_index_put_impl_out::call(self_meta, indices_meta, values_meta, accumulate, unsafe, out_meta); |
4933 | } |
4934 | |
4935 | at::Tensor self_; |
4936 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4937 | at::functionalization::impl::sync(self); |
4938 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4939 | } else { |
4940 | self_ = self; |
4941 | } |
4942 | |
4943 | c10::List<c10::optional<at::Tensor>> indices_; |
4944 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
4945 | at::functionalization::impl::sync(indices); |
4946 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
4947 | } else { |
4948 | indices_ = indices; |
4949 | } |
4950 | |
4951 | at::Tensor values_; |
4952 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
4953 | at::functionalization::impl::sync(values); |
4954 | values_ = at::functionalization::impl::from_functional_tensor(values); |
4955 | } else { |
4956 | values_ = values; |
4957 | } |
4958 | |
4959 | at::Tensor out_; |
4960 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4961 | at::functionalization::impl::sync(out); |
4962 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4963 | } else { |
4964 | out_ = out; |
4965 | } |
4966 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4967 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(values))) { |
4968 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4969 | TORCH_INTERNAL_ASSERT(false, |
4970 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4971 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4972 | } else { |
4973 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4974 | at::AutoDispatchSkipFunctionalize guard; |
4975 | at::Tensor tmp_output = at::_ops::_index_put_impl_out::call(self_, indices_, values_, accumulate, unsafe, out_); |
4976 | return out;; |
4977 | } |
4978 | } else { |
4979 | at::Tensor tmp_output; |
4980 | { |
4981 | at::AutoDispatchSkipFunctionalize guard; |
4982 | tmp_output = at::_ops::_index_put_impl::call(self_, indices_, values_, accumulate, unsafe); |
4983 | } |
4984 | at::functionalization::impl::replace_(out, tmp_output); |
4985 | at::functionalization::impl::commit_update(out); |
4986 | at::functionalization::impl::sync(out); |
4987 | return out; |
4988 | } |
4989 | } |
4990 | |
4991 | at::Tensor & _index_put_impl_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) { |
4992 | if (true) { |
4993 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4994 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4995 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4996 | auto self_meta = to_meta(self); |
4997 | auto indices_meta = to_meta(indices); |
4998 | auto values_meta = to_meta(values); |
4999 | at::AutoDispatchSkipFunctionalize func_guard; |
5000 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5001 | at::_ops::_index_put_impl_::call(self_meta, indices_meta, values_meta, accumulate, unsafe); |
5002 | } |
5003 | |
5004 | at::Tensor self_; |
5005 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5006 | at::functionalization::impl::sync(self); |
5007 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5008 | } else { |
5009 | self_ = self; |
5010 | } |
5011 | |
5012 | c10::List<c10::optional<at::Tensor>> indices_; |
5013 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
5014 | at::functionalization::impl::sync(indices); |
5015 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
5016 | } else { |
5017 | indices_ = indices; |
5018 | } |
5019 | |
5020 | at::Tensor values_; |
5021 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
5022 | at::functionalization::impl::sync(values); |
5023 | values_ = at::functionalization::impl::from_functional_tensor(values); |
5024 | } else { |
5025 | values_ = values; |
5026 | } |
5027 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
5028 | if ((false || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(values))) { |
5029 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5030 | TORCH_INTERNAL_ASSERT(false, |
5031 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5032 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5033 | } else { |
5034 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5035 | at::AutoDispatchSkipFunctionalize guard; |
5036 | at::Tensor tmp_output = at::_ops::_index_put_impl_::call(self_, indices_, values_, accumulate, unsafe); |
5037 | return self;; |
5038 | } |
5039 | } else { |
5040 | at::Tensor tmp_output; |
5041 | { |
5042 | at::AutoDispatchSkipFunctionalize guard; |
5043 | tmp_output = at::_ops::_index_put_impl::call(self_, indices_, values_, accumulate, unsafe); |
5044 | } |
5045 | at::functionalization::impl::replace_(self, tmp_output); |
5046 | at::functionalization::impl::commit_update(self); |
5047 | at::functionalization::impl::sync(self); |
5048 | return self; |
5049 | } |
5050 | } |
5051 | |
5052 | ::std::tuple<at::Tensor &,at::Tensor &> kthvalue_out_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
5053 | if (false) { |
5054 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5055 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5056 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5057 | auto self_meta = to_meta(self); |
5058 | auto values_meta = to_meta(values); |
5059 | auto indices_meta = to_meta(indices); |
5060 | at::AutoDispatchSkipFunctionalize func_guard; |
5061 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5062 | at::_ops::kthvalue_values::call(self_meta, k, dim, keepdim, values_meta, indices_meta); |
5063 | } |
5064 | |
5065 | at::Tensor self_; |
5066 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5067 | at::functionalization::impl::sync(self); |
5068 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5069 | } else { |
5070 | self_ = self; |
5071 | } |
5072 | |
5073 | at::Tensor values_; |
5074 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
5075 | at::functionalization::impl::sync(values); |
5076 | values_ = at::functionalization::impl::from_functional_tensor(values); |
5077 | } else { |
5078 | values_ = values; |
5079 | } |
5080 | |
5081 | at::Tensor indices_; |
5082 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
5083 | at::functionalization::impl::sync(indices); |
5084 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
5085 | } else { |
5086 | indices_ = indices; |
5087 | } |
5088 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
5089 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5090 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5091 | TORCH_INTERNAL_ASSERT(false, |
5092 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5093 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5094 | } else { |
5095 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5096 | at::AutoDispatchSkipFunctionalize guard; |
5097 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::kthvalue_values::call(self_, k, dim, keepdim, values_, indices_); |
5098 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
5099 | } |
5100 | } else { |
5101 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
5102 | { |
5103 | at::AutoDispatchSkipFunctionalize guard; |
5104 | tmp_output = at::_ops::kthvalue::call(self_, k, dim, keepdim); |
5105 | } |
5106 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
5107 | at::functionalization::impl::commit_update(values); |
5108 | at::functionalization::impl::sync(values); |
5109 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
5110 | at::functionalization::impl::commit_update(indices); |
5111 | at::functionalization::impl::sync(indices); |
5112 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
5113 | } |
5114 | } |
5115 | |
5116 | ::std::tuple<at::Tensor &,at::Tensor &> kthvalue_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
5117 | if (false) { |
5118 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5119 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5120 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5121 | auto self_meta = to_meta(self); |
5122 | auto values_meta = to_meta(values); |
5123 | auto indices_meta = to_meta(indices); |
5124 | at::AutoDispatchSkipFunctionalize func_guard; |
5125 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5126 | at::_ops::kthvalue_dimname_out::call(self_meta, k, dim, keepdim, values_meta, indices_meta); |
5127 | } |
5128 | |
5129 | at::Tensor self_; |
5130 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5131 | at::functionalization::impl::sync(self); |
5132 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5133 | } else { |
5134 | self_ = self; |
5135 | } |
5136 | |
5137 | at::Tensor values_; |
5138 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
5139 | at::functionalization::impl::sync(values); |
5140 | values_ = at::functionalization::impl::from_functional_tensor(values); |
5141 | } else { |
5142 | values_ = values; |
5143 | } |
5144 | |
5145 | at::Tensor indices_; |
5146 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
5147 | at::functionalization::impl::sync(indices); |
5148 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
5149 | } else { |
5150 | indices_ = indices; |
5151 | } |
5152 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
5153 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5154 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5155 | TORCH_INTERNAL_ASSERT(false, |
5156 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5157 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5158 | } else { |
5159 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5160 | at::AutoDispatchSkipFunctionalize guard; |
5161 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::kthvalue_dimname_out::call(self_, k, dim, keepdim, values_, indices_); |
5162 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
5163 | } |
5164 | } else { |
5165 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
5166 | { |
5167 | at::AutoDispatchSkipFunctionalize guard; |
5168 | tmp_output = at::_ops::kthvalue_dimname::call(self_, k, dim, keepdim); |
5169 | } |
5170 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
5171 | at::functionalization::impl::commit_update(values); |
5172 | at::functionalization::impl::sync(values); |
5173 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
5174 | at::functionalization::impl::commit_update(indices); |
5175 | at::functionalization::impl::sync(indices); |
5176 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
5177 | } |
5178 | } |
5179 | |
5180 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
5181 | if (false) { |
5182 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5183 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5184 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5185 | auto input_meta = to_meta(input); |
5186 | auto weight_meta = to_meta(weight); |
5187 | auto bias_meta = to_meta(bias); |
5188 | auto out0_meta = to_meta(out0); |
5189 | auto out1_meta = to_meta(out1); |
5190 | auto out2_meta = to_meta(out2); |
5191 | at::AutoDispatchSkipFunctionalize func_guard; |
5192 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5193 | at::_ops::native_layer_norm_out::call(input_meta, normalized_shape, weight_meta, bias_meta, eps, out0_meta, out1_meta, out2_meta); |
5194 | } |
5195 | |
5196 | at::Tensor input_; |
5197 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
5198 | at::functionalization::impl::sync(input); |
5199 | input_ = at::functionalization::impl::from_functional_tensor(input); |
5200 | } else { |
5201 | input_ = input; |
5202 | } |
5203 | |
5204 | c10::optional<at::Tensor> weight_; |
5205 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
5206 | at::functionalization::impl::sync(weight); |
5207 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
5208 | } else { |
5209 | weight_ = weight; |
5210 | } |
5211 | |
5212 | c10::optional<at::Tensor> bias_; |
5213 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
5214 | at::functionalization::impl::sync(bias); |
5215 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
5216 | } else { |
5217 | bias_ = bias; |
5218 | } |
5219 | |
5220 | at::Tensor out0_; |
5221 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
5222 | at::functionalization::impl::sync(out0); |
5223 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
5224 | } else { |
5225 | out0_ = out0; |
5226 | } |
5227 | |
5228 | at::Tensor out1_; |
5229 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
5230 | at::functionalization::impl::sync(out1); |
5231 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
5232 | } else { |
5233 | out1_ = out1; |
5234 | } |
5235 | |
5236 | at::Tensor out2_; |
5237 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
5238 | at::functionalization::impl::sync(out2); |
5239 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
5240 | } else { |
5241 | out2_ = out2; |
5242 | } |
5243 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
5244 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
5245 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5246 | TORCH_INTERNAL_ASSERT(false, |
5247 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5248 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5249 | } else { |
5250 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5251 | at::AutoDispatchSkipFunctionalize guard; |
5252 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::native_layer_norm_out::call(input_, normalized_shape, weight_, bias_, eps, out0_, out1_, out2_); |
5253 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);; |
5254 | } |
5255 | } else { |
5256 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
5257 | { |
5258 | at::AutoDispatchSkipFunctionalize guard; |
5259 | tmp_output = at::_ops::native_layer_norm::call(input_, normalized_shape, weight_, bias_, eps); |
5260 | } |
5261 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
5262 | at::functionalization::impl::commit_update(out0); |
5263 | at::functionalization::impl::sync(out0); |
5264 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
5265 | at::functionalization::impl::commit_update(out1); |
5266 | at::functionalization::impl::sync(out1); |
5267 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
5268 | at::functionalization::impl::commit_update(out2); |
5269 | at::functionalization::impl::sync(out2); |
5270 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
5271 | } |
5272 | } |
5273 | |
5274 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
5275 | if (false) { |
5276 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5277 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5278 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5279 | auto grad_out_meta = to_meta(grad_out); |
5280 | auto input_meta = to_meta(input); |
5281 | auto mean_meta = to_meta(mean); |
5282 | auto rstd_meta = to_meta(rstd); |
5283 | auto weight_meta = to_meta(weight); |
5284 | auto bias_meta = to_meta(bias); |
5285 | auto out0_meta = to_meta(out0); |
5286 | auto out1_meta = to_meta(out1); |
5287 | auto out2_meta = to_meta(out2); |
5288 | at::AutoDispatchSkipFunctionalize func_guard; |
5289 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5290 | at::_ops::native_layer_norm_backward_out::call(grad_out_meta, input_meta, normalized_shape, mean_meta, rstd_meta, weight_meta, bias_meta, output_mask, out0_meta, out1_meta, out2_meta); |
5291 | } |
5292 | |
5293 | at::Tensor grad_out_; |
5294 | if (at::functionalization::impl::isFunctionalTensor(grad_out)) { |
5295 | at::functionalization::impl::sync(grad_out); |
5296 | grad_out_ = at::functionalization::impl::from_functional_tensor(grad_out); |
5297 | } else { |
5298 | grad_out_ = grad_out; |
5299 | } |
5300 | |
5301 | at::Tensor input_; |
5302 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
5303 | at::functionalization::impl::sync(input); |
5304 | input_ = at::functionalization::impl::from_functional_tensor(input); |
5305 | } else { |
5306 | input_ = input; |
5307 | } |
5308 | |
5309 | at::Tensor mean_; |
5310 | if (at::functionalization::impl::isFunctionalTensor(mean)) { |
5311 | at::functionalization::impl::sync(mean); |
5312 | mean_ = at::functionalization::impl::from_functional_tensor(mean); |
5313 | } else { |
5314 | mean_ = mean; |
5315 | } |
5316 | |
5317 | at::Tensor rstd_; |
5318 | if (at::functionalization::impl::isFunctionalTensor(rstd)) { |
5319 | at::functionalization::impl::sync(rstd); |
5320 | rstd_ = at::functionalization::impl::from_functional_tensor(rstd); |
5321 | } else { |
5322 | rstd_ = rstd; |
5323 | } |
5324 | |
5325 | c10::optional<at::Tensor> weight_; |
5326 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
5327 | at::functionalization::impl::sync(weight); |
5328 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
5329 | } else { |
5330 | weight_ = weight; |
5331 | } |
5332 | |
5333 | c10::optional<at::Tensor> bias_; |
5334 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
5335 | at::functionalization::impl::sync(bias); |
5336 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
5337 | } else { |
5338 | bias_ = bias; |
5339 | } |
5340 | |
5341 | at::Tensor out0_; |
5342 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
5343 | at::functionalization::impl::sync(out0); |
5344 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
5345 | } else { |
5346 | out0_ = out0; |
5347 | } |
5348 | |
5349 | at::Tensor out1_; |
5350 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
5351 | at::functionalization::impl::sync(out1); |
5352 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
5353 | } else { |
5354 | out1_ = out1; |
5355 | } |
5356 | |
5357 | at::Tensor out2_; |
5358 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
5359 | at::functionalization::impl::sync(out2); |
5360 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
5361 | } else { |
5362 | out2_ = out2; |
5363 | } |
5364 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
5365 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_out) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(rstd) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
5366 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5367 | TORCH_INTERNAL_ASSERT(false, |
5368 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5369 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5370 | } else { |
5371 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5372 | at::AutoDispatchSkipFunctionalize guard; |
5373 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::native_layer_norm_backward_out::call(grad_out_, input_, normalized_shape, mean_, rstd_, weight_, bias_, output_mask, out0_, out1_, out2_); |
5374 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);; |
5375 | } |
5376 | } else { |
5377 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
5378 | { |
5379 | at::AutoDispatchSkipFunctionalize guard; |
5380 | tmp_output = at::_ops::native_layer_norm_backward::call(grad_out_, input_, normalized_shape, mean_, rstd_, weight_, bias_, output_mask); |
5381 | } |
5382 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
5383 | at::functionalization::impl::commit_update(out0); |
5384 | at::functionalization::impl::sync(out0); |
5385 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
5386 | at::functionalization::impl::commit_update(out1); |
5387 | at::functionalization::impl::sync(out1); |
5388 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
5389 | at::functionalization::impl::commit_update(out2); |
5390 | at::functionalization::impl::sync(out2); |
5391 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
5392 | } |
5393 | } |
5394 | |
5395 | at::Tensor & mkldnn_linear_backward_input_out_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out) { |
5396 | if (false) { |
5397 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5398 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5399 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5400 | auto grad_output_meta = to_meta(grad_output); |
5401 | auto weight_meta = to_meta(weight); |
5402 | auto out_meta = to_meta(out); |
5403 | at::AutoDispatchSkipFunctionalize func_guard; |
5404 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5405 | at::_ops::mkldnn_linear_backward_input_out::call(input_size, grad_output_meta, weight_meta, out_meta); |
5406 | } |
5407 | |
5408 | at::Tensor grad_output_; |
5409 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
5410 | at::functionalization::impl::sync(grad_output); |
5411 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
5412 | } else { |
5413 | grad_output_ = grad_output; |
5414 | } |
5415 | |
5416 | at::Tensor weight_; |
5417 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
5418 | at::functionalization::impl::sync(weight); |
5419 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
5420 | } else { |
5421 | weight_ = weight; |
5422 | } |
5423 | |
5424 | at::Tensor out_; |
5425 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5426 | at::functionalization::impl::sync(out); |
5427 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5428 | } else { |
5429 | out_ = out; |
5430 | } |
5431 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5432 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight))) { |
5433 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5434 | TORCH_INTERNAL_ASSERT(false, |
5435 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5436 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5437 | } else { |
5438 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5439 | at::AutoDispatchSkipFunctionalize guard; |
5440 | at::Tensor tmp_output = at::_ops::mkldnn_linear_backward_input_out::call(input_size, grad_output_, weight_, out_); |
5441 | return out;; |
5442 | } |
5443 | } else { |
5444 | at::Tensor tmp_output; |
5445 | { |
5446 | at::AutoDispatchSkipFunctionalize guard; |
5447 | tmp_output = at::_ops::mkldnn_linear_backward_input::call(input_size, grad_output_, weight_); |
5448 | } |
5449 | at::functionalization::impl::replace_(out, tmp_output); |
5450 | at::functionalization::impl::commit_update(out); |
5451 | at::functionalization::impl::sync(out); |
5452 | return out; |
5453 | } |
5454 | } |
5455 | |
5456 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_linear_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
5457 | if (false) { |
5458 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5459 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5460 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5461 | auto self_meta = to_meta(self); |
5462 | auto grad_output_meta = to_meta(grad_output); |
5463 | auto weight_meta = to_meta(weight); |
5464 | auto out0_meta = to_meta(out0); |
5465 | auto out1_meta = to_meta(out1); |
5466 | auto out2_meta = to_meta(out2); |
5467 | at::AutoDispatchSkipFunctionalize func_guard; |
5468 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5469 | at::_ops::mkldnn_linear_backward_out::call(self_meta, grad_output_meta, weight_meta, output_mask, out0_meta, out1_meta, out2_meta); |
5470 | } |
5471 | |
5472 | at::Tensor self_; |
5473 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5474 | at::functionalization::impl::sync(self); |
5475 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5476 | } else { |
5477 | self_ = self; |
5478 | } |
5479 | |
5480 | at::Tensor grad_output_; |
5481 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
5482 | at::functionalization::impl::sync(grad_output); |
5483 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
5484 | } else { |
5485 | grad_output_ = grad_output; |
5486 | } |
5487 | |
5488 | at::Tensor weight_; |
5489 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
5490 | at::functionalization::impl::sync(weight); |
5491 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
5492 | } else { |
5493 | weight_ = weight; |
5494 | } |
5495 | |
5496 | at::Tensor out0_; |
5497 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
5498 | at::functionalization::impl::sync(out0); |
5499 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
5500 | } else { |
5501 | out0_ = out0; |
5502 | } |
5503 | |
5504 | at::Tensor out1_; |
5505 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
5506 | at::functionalization::impl::sync(out1); |
5507 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
5508 | } else { |
5509 | out1_ = out1; |
5510 | } |
5511 | |
5512 | at::Tensor out2_; |
5513 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
5514 | at::functionalization::impl::sync(out2); |
5515 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
5516 | } else { |
5517 | out2_ = out2; |
5518 | } |
5519 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
5520 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight))) { |
5521 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5522 | TORCH_INTERNAL_ASSERT(false, |
5523 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5524 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5525 | } else { |
5526 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5527 | at::AutoDispatchSkipFunctionalize guard; |
5528 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::mkldnn_linear_backward_out::call(self_, grad_output_, weight_, output_mask, out0_, out1_, out2_); |
5529 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);; |
5530 | } |
5531 | } else { |
5532 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
5533 | { |
5534 | at::AutoDispatchSkipFunctionalize guard; |
5535 | tmp_output = at::_ops::mkldnn_linear_backward::call(self_, grad_output_, weight_, output_mask); |
5536 | } |
5537 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
5538 | at::functionalization::impl::commit_update(out0); |
5539 | at::functionalization::impl::sync(out0); |
5540 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
5541 | at::functionalization::impl::commit_update(out1); |
5542 | at::functionalization::impl::sync(out1); |
5543 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
5544 | at::functionalization::impl::commit_update(out2); |
5545 | at::functionalization::impl::sync(out2); |
5546 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
5547 | } |
5548 | } |
5549 | |
5550 | at::Tensor & log10_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
5551 | if (false) { |
5552 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5553 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5554 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5555 | auto self_meta = to_meta(self); |
5556 | auto out_meta = to_meta(out); |
5557 | at::AutoDispatchSkipFunctionalize func_guard; |
5558 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5559 | at::_ops::log10_out::call(self_meta, out_meta); |
5560 | } |
5561 | |
5562 | at::Tensor self_; |
5563 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5564 | at::functionalization::impl::sync(self); |
5565 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5566 | } else { |
5567 | self_ = self; |
5568 | } |
5569 | |
5570 | at::Tensor out_; |
5571 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5572 | at::functionalization::impl::sync(out); |
5573 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5574 | } else { |
5575 | out_ = out; |
5576 | } |
5577 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5578 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5579 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5580 | TORCH_INTERNAL_ASSERT(false, |
5581 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5582 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5583 | } else { |
5584 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5585 | at::AutoDispatchSkipFunctionalize guard; |
5586 | at::Tensor tmp_output = at::_ops::log10_out::call(self_, out_); |
5587 | return out;; |
5588 | } |
5589 | } else { |
5590 | at::Tensor tmp_output; |
5591 | { |
5592 | at::AutoDispatchSkipFunctionalize guard; |
5593 | tmp_output = at::_ops::log10::call(self_); |
5594 | } |
5595 | at::functionalization::impl::replace_(out, tmp_output); |
5596 | at::functionalization::impl::commit_update(out); |
5597 | at::functionalization::impl::sync(out); |
5598 | return out; |
5599 | } |
5600 | } |
5601 | |
5602 | at::Tensor & log10_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
5603 | if (true) { |
5604 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5605 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5606 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5607 | auto self_meta = to_meta(self); |
5608 | at::AutoDispatchSkipFunctionalize func_guard; |
5609 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5610 | at::_ops::log10_::call(self_meta); |
5611 | } |
5612 | |
5613 | at::Tensor self_; |
5614 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5615 | at::functionalization::impl::sync(self); |
5616 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5617 | } else { |
5618 | self_ = self; |
5619 | } |
5620 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
5621 | if ((false)) { |
5622 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5623 | TORCH_INTERNAL_ASSERT(false, |
5624 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5625 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5626 | } else { |
5627 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5628 | at::AutoDispatchSkipFunctionalize guard; |
5629 | at::Tensor tmp_output = at::_ops::log10_::call(self_); |
5630 | return self;; |
5631 | } |
5632 | } else { |
5633 | at::Tensor tmp_output; |
5634 | { |
5635 | at::AutoDispatchSkipFunctionalize guard; |
5636 | tmp_output = at::_ops::log10::call(self_); |
5637 | } |
5638 | at::functionalization::impl::replace_(self, tmp_output); |
5639 | at::functionalization::impl::commit_update(self); |
5640 | at::functionalization::impl::sync(self); |
5641 | return self; |
5642 | } |
5643 | } |
5644 | |
5645 | at::Tensor & log1p_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
5646 | if (false) { |
5647 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5648 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5649 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5650 | auto self_meta = to_meta(self); |
5651 | auto out_meta = to_meta(out); |
5652 | at::AutoDispatchSkipFunctionalize func_guard; |
5653 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5654 | at::_ops::log1p_out::call(self_meta, out_meta); |
5655 | } |
5656 | |
5657 | at::Tensor self_; |
5658 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5659 | at::functionalization::impl::sync(self); |
5660 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5661 | } else { |
5662 | self_ = self; |
5663 | } |
5664 | |
5665 | at::Tensor out_; |
5666 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5667 | at::functionalization::impl::sync(out); |
5668 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5669 | } else { |
5670 | out_ = out; |
5671 | } |
5672 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5673 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5674 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5675 | TORCH_INTERNAL_ASSERT(false, |
5676 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5677 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5678 | } else { |
5679 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5680 | at::AutoDispatchSkipFunctionalize guard; |
5681 | at::Tensor tmp_output = at::_ops::log1p_out::call(self_, out_); |
5682 | return out;; |
5683 | } |
5684 | } else { |
5685 | at::Tensor tmp_output; |
5686 | { |
5687 | at::AutoDispatchSkipFunctionalize guard; |
5688 | tmp_output = at::_ops::log1p::call(self_); |
5689 | } |
5690 | at::functionalization::impl::replace_(out, tmp_output); |
5691 | at::functionalization::impl::commit_update(out); |
5692 | at::functionalization::impl::sync(out); |
5693 | return out; |
5694 | } |
5695 | } |
5696 | |
5697 | at::Tensor & log1p_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
5698 | if (true) { |
5699 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5700 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5701 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5702 | auto self_meta = to_meta(self); |
5703 | at::AutoDispatchSkipFunctionalize func_guard; |
5704 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5705 | at::_ops::log1p_::call(self_meta); |
5706 | } |
5707 | |
5708 | at::Tensor self_; |
5709 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5710 | at::functionalization::impl::sync(self); |
5711 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5712 | } else { |
5713 | self_ = self; |
5714 | } |
5715 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
5716 | if ((false)) { |
5717 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5718 | TORCH_INTERNAL_ASSERT(false, |
5719 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5720 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5721 | } else { |
5722 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5723 | at::AutoDispatchSkipFunctionalize guard; |
5724 | at::Tensor tmp_output = at::_ops::log1p_::call(self_); |
5725 | return self;; |
5726 | } |
5727 | } else { |
5728 | at::Tensor tmp_output; |
5729 | { |
5730 | at::AutoDispatchSkipFunctionalize guard; |
5731 | tmp_output = at::_ops::log1p::call(self_); |
5732 | } |
5733 | at::functionalization::impl::replace_(self, tmp_output); |
5734 | at::functionalization::impl::commit_update(self); |
5735 | at::functionalization::impl::sync(self); |
5736 | return self; |
5737 | } |
5738 | } |
5739 | |
5740 | at::Tensor & logsumexp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { |
5741 | if (false) { |
5742 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5743 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5744 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5745 | auto self_meta = to_meta(self); |
5746 | auto out_meta = to_meta(out); |
5747 | at::AutoDispatchSkipFunctionalize func_guard; |
5748 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5749 | at::_ops::logsumexp_out::call(self_meta, dim, keepdim, out_meta); |
5750 | } |
5751 | |
5752 | at::Tensor self_; |
5753 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5754 | at::functionalization::impl::sync(self); |
5755 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5756 | } else { |
5757 | self_ = self; |
5758 | } |
5759 | |
5760 | at::Tensor out_; |
5761 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5762 | at::functionalization::impl::sync(out); |
5763 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5764 | } else { |
5765 | out_ = out; |
5766 | } |
5767 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5768 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5769 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5770 | TORCH_INTERNAL_ASSERT(false, |
5771 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5772 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5773 | } else { |
5774 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5775 | at::AutoDispatchSkipFunctionalize guard; |
5776 | at::Tensor tmp_output = at::_ops::logsumexp_out::call(self_, dim, keepdim, out_); |
5777 | return out;; |
5778 | } |
5779 | } else { |
5780 | at::Tensor tmp_output; |
5781 | { |
5782 | at::AutoDispatchSkipFunctionalize guard; |
5783 | tmp_output = at::_ops::logsumexp::call(self_, dim, keepdim); |
5784 | } |
5785 | at::functionalization::impl::replace_(out, tmp_output); |
5786 | at::functionalization::impl::commit_update(out); |
5787 | at::functionalization::impl::sync(out); |
5788 | return out; |
5789 | } |
5790 | } |
5791 | |
5792 | at::Tensor & logsumexp_out_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) { |
5793 | if (false) { |
5794 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5795 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5796 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5797 | auto self_meta = to_meta(self); |
5798 | auto out_meta = to_meta(out); |
5799 | at::AutoDispatchSkipFunctionalize func_guard; |
5800 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5801 | at::_ops::logsumexp_names_out::call(self_meta, dim, keepdim, out_meta); |
5802 | } |
5803 | |
5804 | at::Tensor self_; |
5805 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5806 | at::functionalization::impl::sync(self); |
5807 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5808 | } else { |
5809 | self_ = self; |
5810 | } |
5811 | |
5812 | at::Tensor out_; |
5813 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5814 | at::functionalization::impl::sync(out); |
5815 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5816 | } else { |
5817 | out_ = out; |
5818 | } |
5819 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5820 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5821 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5822 | TORCH_INTERNAL_ASSERT(false, |
5823 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5824 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5825 | } else { |
5826 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5827 | at::AutoDispatchSkipFunctionalize guard; |
5828 | at::Tensor tmp_output = at::_ops::logsumexp_names_out::call(self_, dim, keepdim, out_); |
5829 | return out;; |
5830 | } |
5831 | } else { |
5832 | at::Tensor tmp_output; |
5833 | { |
5834 | at::AutoDispatchSkipFunctionalize guard; |
5835 | tmp_output = at::_ops::logsumexp_names::call(self_, dim, keepdim); |
5836 | } |
5837 | at::functionalization::impl::replace_(out, tmp_output); |
5838 | at::functionalization::impl::commit_update(out); |
5839 | at::functionalization::impl::sync(out); |
5840 | return out; |
5841 | } |
5842 | } |
5843 | |
5844 | at::Tensor & matmul_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
5845 | if (false) { |
5846 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5847 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5848 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5849 | auto self_meta = to_meta(self); |
5850 | auto other_meta = to_meta(other); |
5851 | auto out_meta = to_meta(out); |
5852 | at::AutoDispatchSkipFunctionalize func_guard; |
5853 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5854 | at::_ops::matmul_out::call(self_meta, other_meta, out_meta); |
5855 | } |
5856 | |
5857 | at::Tensor self_; |
5858 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5859 | at::functionalization::impl::sync(self); |
5860 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5861 | } else { |
5862 | self_ = self; |
5863 | } |
5864 | |
5865 | at::Tensor other_; |
5866 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
5867 | at::functionalization::impl::sync(other); |
5868 | other_ = at::functionalization::impl::from_functional_tensor(other); |
5869 | } else { |
5870 | other_ = other; |
5871 | } |
5872 | |
5873 | at::Tensor out_; |
5874 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5875 | at::functionalization::impl::sync(out); |
5876 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5877 | } else { |
5878 | out_ = out; |
5879 | } |
5880 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5881 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
5882 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5883 | TORCH_INTERNAL_ASSERT(false, |
5884 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5885 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5886 | } else { |
5887 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5888 | at::AutoDispatchSkipFunctionalize guard; |
5889 | at::Tensor tmp_output = at::_ops::matmul_out::call(self_, other_, out_); |
5890 | return out;; |
5891 | } |
5892 | } else { |
5893 | at::Tensor tmp_output; |
5894 | { |
5895 | at::AutoDispatchSkipFunctionalize guard; |
5896 | tmp_output = at::_ops::matmul::call(self_, other_); |
5897 | } |
5898 | at::functionalization::impl::replace_(out, tmp_output); |
5899 | at::functionalization::impl::commit_update(out); |
5900 | at::functionalization::impl::sync(out); |
5901 | return out; |
5902 | } |
5903 | } |
5904 | |
5905 | at::Tensor & mkldnn_max_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
5906 | if (false) { |
5907 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5908 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5909 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5910 | auto self_meta = to_meta(self); |
5911 | auto out_meta = to_meta(out); |
5912 | at::AutoDispatchSkipFunctionalize func_guard; |
5913 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5914 | at::_ops::mkldnn_max_pool2d_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta); |
5915 | } |
5916 | |
5917 | at::Tensor self_; |
5918 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5919 | at::functionalization::impl::sync(self); |
5920 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5921 | } else { |
5922 | self_ = self; |
5923 | } |
5924 | |
5925 | at::Tensor out_; |
5926 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5927 | at::functionalization::impl::sync(out); |
5928 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5929 | } else { |
5930 | out_ = out; |
5931 | } |
5932 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5933 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5934 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5935 | TORCH_INTERNAL_ASSERT(false, |
5936 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5937 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5938 | } else { |
5939 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5940 | at::AutoDispatchSkipFunctionalize guard; |
5941 | at::Tensor tmp_output = at::_ops::mkldnn_max_pool2d_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_); |
5942 | return out;; |
5943 | } |
5944 | } else { |
5945 | at::Tensor tmp_output; |
5946 | { |
5947 | at::AutoDispatchSkipFunctionalize guard; |
5948 | tmp_output = at::_ops::mkldnn_max_pool2d::call(self_, kernel_size, stride, padding, dilation, ceil_mode); |
5949 | } |
5950 | at::functionalization::impl::replace_(out, tmp_output); |
5951 | at::functionalization::impl::commit_update(out); |
5952 | at::functionalization::impl::sync(out); |
5953 | return out; |
5954 | } |
5955 | } |
5956 | |
5957 | at::Tensor & quantized_max_pool1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
5958 | if (false) { |
5959 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5960 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5961 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5962 | auto self_meta = to_meta(self); |
5963 | auto out_meta = to_meta(out); |
5964 | at::AutoDispatchSkipFunctionalize func_guard; |
5965 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5966 | at::_ops::quantized_max_pool1d_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta); |
5967 | } |
5968 | |
5969 | at::Tensor self_; |
5970 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5971 | at::functionalization::impl::sync(self); |
5972 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5973 | } else { |
5974 | self_ = self; |
5975 | } |
5976 | |
5977 | at::Tensor out_; |
5978 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5979 | at::functionalization::impl::sync(out); |
5980 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5981 | } else { |
5982 | out_ = out; |
5983 | } |
5984 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5985 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5986 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5987 | TORCH_INTERNAL_ASSERT(false, |
5988 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5989 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5990 | } else { |
5991 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5992 | at::AutoDispatchSkipFunctionalize guard; |
5993 | at::Tensor tmp_output = at::_ops::quantized_max_pool1d_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_); |
5994 | return out;; |
5995 | } |
5996 | } else { |
5997 | at::Tensor tmp_output; |
5998 | { |
5999 | at::AutoDispatchSkipFunctionalize guard; |
6000 | tmp_output = at::_ops::quantized_max_pool1d::call(self_, kernel_size, stride, padding, dilation, ceil_mode); |
6001 | } |
6002 | at::functionalization::impl::replace_(out, tmp_output); |
6003 | at::functionalization::impl::commit_update(out); |
6004 | at::functionalization::impl::sync(out); |
6005 | return out; |
6006 | } |
6007 | } |
6008 | |
6009 | at::Tensor & nanmean_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
6010 | if (false) { |
6011 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6012 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6013 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6014 | auto self_meta = to_meta(self); |
6015 | auto out_meta = to_meta(out); |
6016 | at::AutoDispatchSkipFunctionalize func_guard; |
6017 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6018 | at::_ops::nanmean_out::call(self_meta, dim, keepdim, dtype, out_meta); |
6019 | } |
6020 | |
6021 | at::Tensor self_; |
6022 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6023 | at::functionalization::impl::sync(self); |
6024 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6025 | } else { |
6026 | self_ = self; |
6027 | } |
6028 | |
6029 | at::Tensor out_; |
6030 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6031 | at::functionalization::impl::sync(out); |
6032 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6033 | } else { |
6034 | out_ = out; |
6035 | } |
6036 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6037 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6038 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6039 | TORCH_INTERNAL_ASSERT(false, |
6040 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6041 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6042 | } else { |
6043 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6044 | at::AutoDispatchSkipFunctionalize guard; |
6045 | at::Tensor tmp_output = at::_ops::nanmean_out::call(self_, dim, keepdim, dtype, out_); |
6046 | return out;; |
6047 | } |
6048 | } else { |
6049 | at::Tensor tmp_output; |
6050 | { |
6051 | at::AutoDispatchSkipFunctionalize guard; |
6052 | tmp_output = at::_ops::nanmean::call(self_, dim, keepdim, dtype); |
6053 | } |
6054 | at::functionalization::impl::replace_(out, tmp_output); |
6055 | at::functionalization::impl::commit_update(out); |
6056 | at::functionalization::impl::sync(out); |
6057 | return out; |
6058 | } |
6059 | } |
6060 | |
6061 | at::Tensor & _mps_convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
6062 | if (false) { |
6063 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6064 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6065 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6066 | auto self_meta = to_meta(self); |
6067 | auto weight_meta = to_meta(weight); |
6068 | auto bias_meta = to_meta(bias); |
6069 | auto out_meta = to_meta(out); |
6070 | at::AutoDispatchSkipFunctionalize func_guard; |
6071 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6072 | at::_ops::_mps_convolution_out::call(self_meta, weight_meta, bias_meta, padding, stride, dilation, groups, out_meta); |
6073 | } |
6074 | |
6075 | at::Tensor self_; |
6076 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6077 | at::functionalization::impl::sync(self); |
6078 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6079 | } else { |
6080 | self_ = self; |
6081 | } |
6082 | |
6083 | at::Tensor weight_; |
6084 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
6085 | at::functionalization::impl::sync(weight); |
6086 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
6087 | } else { |
6088 | weight_ = weight; |
6089 | } |
6090 | |
6091 | c10::optional<at::Tensor> bias_; |
6092 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
6093 | at::functionalization::impl::sync(bias); |
6094 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
6095 | } else { |
6096 | bias_ = bias; |
6097 | } |
6098 | |
6099 | at::Tensor out_; |
6100 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6101 | at::functionalization::impl::sync(out); |
6102 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6103 | } else { |
6104 | out_ = out; |
6105 | } |
6106 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6107 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
6108 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6109 | TORCH_INTERNAL_ASSERT(false, |
6110 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6111 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6112 | } else { |
6113 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6114 | at::AutoDispatchSkipFunctionalize guard; |
6115 | at::Tensor tmp_output = at::_ops::_mps_convolution_out::call(self_, weight_, bias_, padding, stride, dilation, groups, out_); |
6116 | return out;; |
6117 | } |
6118 | } else { |
6119 | at::Tensor tmp_output; |
6120 | { |
6121 | at::AutoDispatchSkipFunctionalize guard; |
6122 | tmp_output = at::_ops::_mps_convolution::call(self_, weight_, bias_, padding, stride, dilation, groups); |
6123 | } |
6124 | at::functionalization::impl::replace_(out, tmp_output); |
6125 | at::functionalization::impl::commit_update(out); |
6126 | at::functionalization::impl::sync(out); |
6127 | return out; |
6128 | } |
6129 | } |
6130 | |
6131 | at::Tensor & mkldnn_convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
6132 | if (false) { |
6133 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6134 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6135 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6136 | auto self_meta = to_meta(self); |
6137 | auto weight_meta = to_meta(weight); |
6138 | auto bias_meta = to_meta(bias); |
6139 | auto out_meta = to_meta(out); |
6140 | at::AutoDispatchSkipFunctionalize func_guard; |
6141 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6142 | at::_ops::mkldnn_convolution_out::call(self_meta, weight_meta, bias_meta, padding, stride, dilation, groups, out_meta); |
6143 | } |
6144 | |
6145 | at::Tensor self_; |
6146 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6147 | at::functionalization::impl::sync(self); |
6148 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6149 | } else { |
6150 | self_ = self; |
6151 | } |
6152 | |
6153 | at::Tensor weight_; |
6154 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
6155 | at::functionalization::impl::sync(weight); |
6156 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
6157 | } else { |
6158 | weight_ = weight; |
6159 | } |
6160 | |
6161 | c10::optional<at::Tensor> bias_; |
6162 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
6163 | at::functionalization::impl::sync(bias); |
6164 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
6165 | } else { |
6166 | bias_ = bias; |
6167 | } |
6168 | |
6169 | at::Tensor out_; |
6170 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6171 | at::functionalization::impl::sync(out); |
6172 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6173 | } else { |
6174 | out_ = out; |
6175 | } |
6176 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6177 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
6178 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6179 | TORCH_INTERNAL_ASSERT(false, |
6180 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6181 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6182 | } else { |
6183 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6184 | at::AutoDispatchSkipFunctionalize guard; |
6185 | at::Tensor tmp_output = at::_ops::mkldnn_convolution_out::call(self_, weight_, bias_, padding, stride, dilation, groups, out_); |
6186 | return out;; |
6187 | } |
6188 | } else { |
6189 | at::Tensor tmp_output; |
6190 | { |
6191 | at::AutoDispatchSkipFunctionalize guard; |
6192 | tmp_output = at::_ops::mkldnn_convolution::call(self_, weight_, bias_, padding, stride, dilation, groups); |
6193 | } |
6194 | at::functionalization::impl::replace_(out, tmp_output); |
6195 | at::functionalization::impl::commit_update(out); |
6196 | at::functionalization::impl::sync(out); |
6197 | return out; |
6198 | } |
6199 | } |
6200 | |
6201 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
6202 | if (false) { |
6203 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6204 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6205 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6206 | auto input_meta = to_meta(input); |
6207 | auto weight0_meta = to_meta(weight0); |
6208 | auto weight1_meta = to_meta(weight1); |
6209 | auto weight2_meta = to_meta(weight2); |
6210 | auto weight3_meta = to_meta(weight3); |
6211 | auto hx__meta = to_meta(hx_); |
6212 | auto cx__meta = to_meta(cx_); |
6213 | auto out0_meta = to_meta(out0); |
6214 | auto out1_meta = to_meta(out1); |
6215 | auto out2_meta = to_meta(out2); |
6216 | auto out3_meta = to_meta(out3); |
6217 | at::AutoDispatchSkipFunctionalize func_guard; |
6218 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6219 | at::_ops::mkldnn_rnn_layer_out::call(input_meta, weight0_meta, weight1_meta, weight2_meta, weight3_meta, hx__meta, cx__meta, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0_meta, out1_meta, out2_meta, out3_meta); |
6220 | } |
6221 | |
6222 | at::Tensor input_; |
6223 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
6224 | at::functionalization::impl::sync(input); |
6225 | input_ = at::functionalization::impl::from_functional_tensor(input); |
6226 | } else { |
6227 | input_ = input; |
6228 | } |
6229 | |
6230 | at::Tensor weight0_; |
6231 | if (at::functionalization::impl::isFunctionalTensor(weight0)) { |
6232 | at::functionalization::impl::sync(weight0); |
6233 | weight0_ = at::functionalization::impl::from_functional_tensor(weight0); |
6234 | } else { |
6235 | weight0_ = weight0; |
6236 | } |
6237 | |
6238 | at::Tensor weight1_; |
6239 | if (at::functionalization::impl::isFunctionalTensor(weight1)) { |
6240 | at::functionalization::impl::sync(weight1); |
6241 | weight1_ = at::functionalization::impl::from_functional_tensor(weight1); |
6242 | } else { |
6243 | weight1_ = weight1; |
6244 | } |
6245 | |
6246 | at::Tensor weight2_; |
6247 | if (at::functionalization::impl::isFunctionalTensor(weight2)) { |
6248 | at::functionalization::impl::sync(weight2); |
6249 | weight2_ = at::functionalization::impl::from_functional_tensor(weight2); |
6250 | } else { |
6251 | weight2_ = weight2; |
6252 | } |
6253 | |
6254 | at::Tensor weight3_; |
6255 | if (at::functionalization::impl::isFunctionalTensor(weight3)) { |
6256 | at::functionalization::impl::sync(weight3); |
6257 | weight3_ = at::functionalization::impl::from_functional_tensor(weight3); |
6258 | } else { |
6259 | weight3_ = weight3; |
6260 | } |
6261 | |
6262 | at::Tensor hx__; |
6263 | if (at::functionalization::impl::isFunctionalTensor(hx_)) { |
6264 | at::functionalization::impl::sync(hx_); |
6265 | hx__ = at::functionalization::impl::from_functional_tensor(hx_); |
6266 | } else { |
6267 | hx__ = hx_; |
6268 | } |
6269 | |
6270 | at::Tensor cx__; |
6271 | if (at::functionalization::impl::isFunctionalTensor(cx_)) { |
6272 | at::functionalization::impl::sync(cx_); |
6273 | cx__ = at::functionalization::impl::from_functional_tensor(cx_); |
6274 | } else { |
6275 | cx__ = cx_; |
6276 | } |
6277 | |
6278 | at::Tensor out0_; |
6279 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
6280 | at::functionalization::impl::sync(out0); |
6281 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
6282 | } else { |
6283 | out0_ = out0; |
6284 | } |
6285 | |
6286 | at::Tensor out1_; |
6287 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
6288 | at::functionalization::impl::sync(out1); |
6289 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
6290 | } else { |
6291 | out1_ = out1; |
6292 | } |
6293 | |
6294 | at::Tensor out2_; |
6295 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
6296 | at::functionalization::impl::sync(out2); |
6297 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
6298 | } else { |
6299 | out2_ = out2; |
6300 | } |
6301 | |
6302 | at::Tensor out3_; |
6303 | if (at::functionalization::impl::isFunctionalTensor(out3)) { |
6304 | at::functionalization::impl::sync(out3); |
6305 | out3_ = at::functionalization::impl::from_functional_tensor(out3); |
6306 | } else { |
6307 | out3_ = out3; |
6308 | } |
6309 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) { |
6310 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight0) || at::functionalization::impl::isFunctionalTensor(weight1) || at::functionalization::impl::isFunctionalTensor(weight2) || at::functionalization::impl::isFunctionalTensor(weight3) || at::functionalization::impl::isFunctionalTensor(hx_) || at::functionalization::impl::isFunctionalTensor(cx_))) { |
6311 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6312 | TORCH_INTERNAL_ASSERT(false, |
6313 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6314 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6315 | } else { |
6316 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6317 | at::AutoDispatchSkipFunctionalize guard; |
6318 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::mkldnn_rnn_layer_out::call(input_, weight0_, weight1_, weight2_, weight3_, hx__, cx__, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0_, out1_, out2_, out3_); |
6319 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);; |
6320 | } |
6321 | } else { |
6322 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output; |
6323 | { |
6324 | at::AutoDispatchSkipFunctionalize guard; |
6325 | tmp_output = at::_ops::mkldnn_rnn_layer::call(input_, weight0_, weight1_, weight2_, weight3_, hx__, cx__, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train); |
6326 | } |
6327 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
6328 | at::functionalization::impl::commit_update(out0); |
6329 | at::functionalization::impl::sync(out0); |
6330 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
6331 | at::functionalization::impl::commit_update(out1); |
6332 | at::functionalization::impl::sync(out1); |
6333 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
6334 | at::functionalization::impl::commit_update(out2); |
6335 | at::functionalization::impl::sync(out2); |
6336 | at::functionalization::impl::replace_(out3, std::get<3>(tmp_output)); |
6337 | at::functionalization::impl::commit_update(out3); |
6338 | at::functionalization::impl::sync(out3); |
6339 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3); |
6340 | } |
6341 | } |
6342 | |
6343 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
6344 | if (false) { |
6345 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6346 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6347 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6348 | auto input_meta = to_meta(input); |
6349 | auto weight_meta = to_meta(weight); |
6350 | auto bias_meta = to_meta(bias); |
6351 | auto running_mean_meta = to_meta(running_mean); |
6352 | auto running_var_meta = to_meta(running_var); |
6353 | auto out0_meta = to_meta(out0); |
6354 | auto out1_meta = to_meta(out1); |
6355 | auto out2_meta = to_meta(out2); |
6356 | at::AutoDispatchSkipFunctionalize func_guard; |
6357 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6358 | at::_ops::miopen_batch_norm_out::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, training, exponential_average_factor, epsilon, out0_meta, out1_meta, out2_meta); |
6359 | } |
6360 | |
6361 | at::Tensor input_; |
6362 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
6363 | at::functionalization::impl::sync(input); |
6364 | input_ = at::functionalization::impl::from_functional_tensor(input); |
6365 | } else { |
6366 | input_ = input; |
6367 | } |
6368 | |
6369 | at::Tensor weight_; |
6370 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
6371 | at::functionalization::impl::sync(weight); |
6372 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
6373 | } else { |
6374 | weight_ = weight; |
6375 | } |
6376 | |
6377 | c10::optional<at::Tensor> bias_; |
6378 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
6379 | at::functionalization::impl::sync(bias); |
6380 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
6381 | } else { |
6382 | bias_ = bias; |
6383 | } |
6384 | |
6385 | c10::optional<at::Tensor> running_mean_; |
6386 | if (at::functionalization::impl::isFunctionalTensor(running_mean)) { |
6387 | at::functionalization::impl::sync(running_mean); |
6388 | running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean); |
6389 | } else { |
6390 | running_mean_ = running_mean; |
6391 | } |
6392 | |
6393 | c10::optional<at::Tensor> running_var_; |
6394 | if (at::functionalization::impl::isFunctionalTensor(running_var)) { |
6395 | at::functionalization::impl::sync(running_var); |
6396 | running_var_ = at::functionalization::impl::from_functional_tensor(running_var); |
6397 | } else { |
6398 | running_var_ = running_var; |
6399 | } |
6400 | |
6401 | at::Tensor out0_; |
6402 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
6403 | at::functionalization::impl::sync(out0); |
6404 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
6405 | } else { |
6406 | out0_ = out0; |
6407 | } |
6408 | |
6409 | at::Tensor out1_; |
6410 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
6411 | at::functionalization::impl::sync(out1); |
6412 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
6413 | } else { |
6414 | out1_ = out1; |
6415 | } |
6416 | |
6417 | at::Tensor out2_; |
6418 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
6419 | at::functionalization::impl::sync(out2); |
6420 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
6421 | } else { |
6422 | out2_ = out2; |
6423 | } |
6424 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
6425 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var))) { |
6426 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6427 | TORCH_INTERNAL_ASSERT(false, |
6428 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6429 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6430 | } else { |
6431 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6432 | at::AutoDispatchSkipFunctionalize guard; |
6433 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::miopen_batch_norm_out::call(input_, weight_, bias_, running_mean_, running_var_, training, exponential_average_factor, epsilon, out0_, out1_, out2_); |
6434 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);; |
6435 | } |
6436 | } else { |
6437 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
6438 | { |
6439 | at::AutoDispatchSkipFunctionalize guard; |
6440 | tmp_output = at::_ops::miopen_batch_norm::call(input_, weight_, bias_, running_mean_, running_var_, training, exponential_average_factor, epsilon); |
6441 | } |
6442 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
6443 | at::functionalization::impl::commit_update(out0); |
6444 | at::functionalization::impl::sync(out0); |
6445 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
6446 | at::functionalization::impl::commit_update(out1); |
6447 | at::functionalization::impl::sync(out1); |
6448 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
6449 | at::functionalization::impl::commit_update(out2); |
6450 | at::functionalization::impl::sync(out2); |
6451 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
6452 | } |
6453 | } |
6454 | |
6455 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
6456 | if (false) { |
6457 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6458 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6459 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6460 | auto input_meta = to_meta(input); |
6461 | auto grad_output_meta = to_meta(grad_output); |
6462 | auto weight_meta = to_meta(weight); |
6463 | auto running_mean_meta = to_meta(running_mean); |
6464 | auto running_var_meta = to_meta(running_var); |
6465 | auto save_mean_meta = to_meta(save_mean); |
6466 | auto save_var_meta = to_meta(save_var); |
6467 | auto out0_meta = to_meta(out0); |
6468 | auto out1_meta = to_meta(out1); |
6469 | auto out2_meta = to_meta(out2); |
6470 | at::AutoDispatchSkipFunctionalize func_guard; |
6471 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6472 | at::_ops::miopen_batch_norm_backward_out::call(input_meta, grad_output_meta, weight_meta, running_mean_meta, running_var_meta, save_mean_meta, save_var_meta, epsilon, out0_meta, out1_meta, out2_meta); |
6473 | } |
6474 | |
6475 | at::Tensor input_; |
6476 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
6477 | at::functionalization::impl::sync(input); |
6478 | input_ = at::functionalization::impl::from_functional_tensor(input); |
6479 | } else { |
6480 | input_ = input; |
6481 | } |
6482 | |
6483 | at::Tensor grad_output_; |
6484 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
6485 | at::functionalization::impl::sync(grad_output); |
6486 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
6487 | } else { |
6488 | grad_output_ = grad_output; |
6489 | } |
6490 | |
6491 | at::Tensor weight_; |
6492 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
6493 | at::functionalization::impl::sync(weight); |
6494 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
6495 | } else { |
6496 | weight_ = weight; |
6497 | } |
6498 | |
6499 | c10::optional<at::Tensor> running_mean_; |
6500 | if (at::functionalization::impl::isFunctionalTensor(running_mean)) { |
6501 | at::functionalization::impl::sync(running_mean); |
6502 | running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean); |
6503 | } else { |
6504 | running_mean_ = running_mean; |
6505 | } |
6506 | |
6507 | c10::optional<at::Tensor> running_var_; |
6508 | if (at::functionalization::impl::isFunctionalTensor(running_var)) { |
6509 | at::functionalization::impl::sync(running_var); |
6510 | running_var_ = at::functionalization::impl::from_functional_tensor(running_var); |
6511 | } else { |
6512 | running_var_ = running_var; |
6513 | } |
6514 | |
6515 | c10::optional<at::Tensor> save_mean_; |
6516 | if (at::functionalization::impl::isFunctionalTensor(save_mean)) { |
6517 | at::functionalization::impl::sync(save_mean); |
6518 | save_mean_ = at::functionalization::impl::from_functional_tensor(save_mean); |
6519 | } else { |
6520 | save_mean_ = save_mean; |
6521 | } |
6522 | |
6523 | c10::optional<at::Tensor> save_var_; |
6524 | if (at::functionalization::impl::isFunctionalTensor(save_var)) { |
6525 | at::functionalization::impl::sync(save_var); |
6526 | save_var_ = at::functionalization::impl::from_functional_tensor(save_var); |
6527 | } else { |
6528 | save_var_ = save_var; |
6529 | } |
6530 | |
6531 | at::Tensor out0_; |
6532 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
6533 | at::functionalization::impl::sync(out0); |
6534 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
6535 | } else { |
6536 | out0_ = out0; |
6537 | } |
6538 | |
6539 | at::Tensor out1_; |
6540 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
6541 | at::functionalization::impl::sync(out1); |
6542 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
6543 | } else { |
6544 | out1_ = out1; |
6545 | } |
6546 | |
6547 | at::Tensor out2_; |
6548 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
6549 | at::functionalization::impl::sync(out2); |
6550 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
6551 | } else { |
6552 | out2_ = out2; |
6553 | } |
6554 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
6555 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var) || at::functionalization::impl::isFunctionalTensor(save_mean) || at::functionalization::impl::isFunctionalTensor(save_var))) { |
6556 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6557 | TORCH_INTERNAL_ASSERT(false, |
6558 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6559 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6560 | } else { |
6561 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6562 | at::AutoDispatchSkipFunctionalize guard; |
6563 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::miopen_batch_norm_backward_out::call(input_, grad_output_, weight_, running_mean_, running_var_, save_mean_, save_var_, epsilon, out0_, out1_, out2_); |
6564 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);; |
6565 | } |
6566 | } else { |
6567 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
6568 | { |
6569 | at::AutoDispatchSkipFunctionalize guard; |
6570 | tmp_output = at::_ops::miopen_batch_norm_backward::call(input_, grad_output_, weight_, running_mean_, running_var_, save_mean_, save_var_, epsilon); |
6571 | } |
6572 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
6573 | at::functionalization::impl::commit_update(out0); |
6574 | at::functionalization::impl::sync(out0); |
6575 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
6576 | at::functionalization::impl::commit_update(out1); |
6577 | at::functionalization::impl::sync(out1); |
6578 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
6579 | at::functionalization::impl::commit_update(out2); |
6580 | at::functionalization::impl::sync(out2); |
6581 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
6582 | } |
6583 | } |
6584 | |
6585 | at::Tensor & miopen_convolution_transpose_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
6586 | if (false) { |
6587 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6588 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6589 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6590 | auto self_meta = to_meta(self); |
6591 | auto weight_meta = to_meta(weight); |
6592 | auto bias_meta = to_meta(bias); |
6593 | auto out_meta = to_meta(out); |
6594 | at::AutoDispatchSkipFunctionalize func_guard; |
6595 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6596 | at::_ops::miopen_convolution_transpose_out::call(self_meta, weight_meta, bias_meta, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out_meta); |
6597 | } |
6598 | |
6599 | at::Tensor self_; |
6600 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6601 | at::functionalization::impl::sync(self); |
6602 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6603 | } else { |
6604 | self_ = self; |
6605 | } |
6606 | |
6607 | at::Tensor weight_; |
6608 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
6609 | at::functionalization::impl::sync(weight); |
6610 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
6611 | } else { |
6612 | weight_ = weight; |
6613 | } |
6614 | |
6615 | c10::optional<at::Tensor> bias_; |
6616 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
6617 | at::functionalization::impl::sync(bias); |
6618 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
6619 | } else { |
6620 | bias_ = bias; |
6621 | } |
6622 | |
6623 | at::Tensor out_; |
6624 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6625 | at::functionalization::impl::sync(out); |
6626 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6627 | } else { |
6628 | out_ = out; |
6629 | } |
6630 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6631 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
6632 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6633 | TORCH_INTERNAL_ASSERT(false, |
6634 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6635 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6636 | } else { |
6637 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6638 | at::AutoDispatchSkipFunctionalize guard; |
6639 | at::Tensor tmp_output = at::_ops::miopen_convolution_transpose_out::call(self_, weight_, bias_, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out_); |
6640 | return out;; |
6641 | } |
6642 | } else { |
6643 | at::Tensor tmp_output; |
6644 | { |
6645 | at::AutoDispatchSkipFunctionalize guard; |
6646 | tmp_output = at::_ops::miopen_convolution_transpose::call(self_, weight_, bias_, padding, output_padding, stride, dilation, groups, benchmark, deterministic); |
6647 | } |
6648 | at::functionalization::impl::replace_(out, tmp_output); |
6649 | at::functionalization::impl::commit_update(out); |
6650 | at::functionalization::impl::sync(out); |
6651 | return out; |
6652 | } |
6653 | } |
6654 | |
6655 | at::Tensor & mm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { |
6656 | if (false) { |
6657 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6658 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6659 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6660 | auto self_meta = to_meta(self); |
6661 | auto mat2_meta = to_meta(mat2); |
6662 | auto out_meta = to_meta(out); |
6663 | at::AutoDispatchSkipFunctionalize func_guard; |
6664 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6665 | at::_ops::mm_out::call(self_meta, mat2_meta, out_meta); |
6666 | } |
6667 | |
6668 | at::Tensor self_; |
6669 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6670 | at::functionalization::impl::sync(self); |
6671 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6672 | } else { |
6673 | self_ = self; |
6674 | } |
6675 | |
6676 | at::Tensor mat2_; |
6677 | if (at::functionalization::impl::isFunctionalTensor(mat2)) { |
6678 | at::functionalization::impl::sync(mat2); |
6679 | mat2_ = at::functionalization::impl::from_functional_tensor(mat2); |
6680 | } else { |
6681 | mat2_ = mat2; |
6682 | } |
6683 | |
6684 | at::Tensor out_; |
6685 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6686 | at::functionalization::impl::sync(out); |
6687 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6688 | } else { |
6689 | out_ = out; |
6690 | } |
6691 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6692 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat2))) { |
6693 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6694 | TORCH_INTERNAL_ASSERT(false, |
6695 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6696 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6697 | } else { |
6698 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6699 | at::AutoDispatchSkipFunctionalize guard; |
6700 | at::Tensor tmp_output = at::_ops::mm_out::call(self_, mat2_, out_); |
6701 | return out;; |
6702 | } |
6703 | } else { |
6704 | at::Tensor tmp_output; |
6705 | { |
6706 | at::AutoDispatchSkipFunctionalize guard; |
6707 | tmp_output = at::_ops::mm::call(self_, mat2_); |
6708 | } |
6709 | at::functionalization::impl::replace_(out, tmp_output); |
6710 | at::functionalization::impl::commit_update(out); |
6711 | at::functionalization::impl::sync(out); |
6712 | return out; |
6713 | } |
6714 | } |
6715 | |
6716 | at::Tensor & _sparse_sparse_matmul_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
6717 | if (false) { |
6718 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6719 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6720 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6721 | auto self_meta = to_meta(self); |
6722 | auto other_meta = to_meta(other); |
6723 | auto out_meta = to_meta(out); |
6724 | at::AutoDispatchSkipFunctionalize func_guard; |
6725 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6726 | at::_ops::_sparse_sparse_matmul_out::call(self_meta, other_meta, out_meta); |
6727 | } |
6728 | |
6729 | at::Tensor self_; |
6730 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6731 | at::functionalization::impl::sync(self); |
6732 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6733 | } else { |
6734 | self_ = self; |
6735 | } |
6736 | |
6737 | at::Tensor other_; |
6738 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
6739 | at::functionalization::impl::sync(other); |
6740 | other_ = at::functionalization::impl::from_functional_tensor(other); |
6741 | } else { |
6742 | other_ = other; |
6743 | } |
6744 | |
6745 | at::Tensor out_; |
6746 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6747 | at::functionalization::impl::sync(out); |
6748 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6749 | } else { |
6750 | out_ = out; |
6751 | } |
6752 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6753 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
6754 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6755 | TORCH_INTERNAL_ASSERT(false, |
6756 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6757 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6758 | } else { |
6759 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6760 | at::AutoDispatchSkipFunctionalize guard; |
6761 | at::Tensor tmp_output = at::_ops::_sparse_sparse_matmul_out::call(self_, other_, out_); |
6762 | return out;; |
6763 | } |
6764 | } else { |
6765 | at::Tensor tmp_output; |
6766 | { |
6767 | at::AutoDispatchSkipFunctionalize guard; |
6768 | tmp_output = at::_ops::_sparse_sparse_matmul::call(self_, other_); |
6769 | } |
6770 | at::functionalization::impl::replace_(out, tmp_output); |
6771 | at::functionalization::impl::commit_update(out); |
6772 | at::functionalization::impl::sync(out); |
6773 | return out; |
6774 | } |
6775 | } |
6776 | |
6777 | at::Tensor & mul_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
6778 | if (false) { |
6779 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6780 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6781 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6782 | auto self_meta = to_meta(self); |
6783 | auto other_meta = to_meta(other); |
6784 | auto out_meta = to_meta(out); |
6785 | at::AutoDispatchSkipFunctionalize func_guard; |
6786 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6787 | at::_ops::mul_out::call(self_meta, other_meta, out_meta); |
6788 | } |
6789 | |
6790 | at::Tensor self_; |
6791 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6792 | at::functionalization::impl::sync(self); |
6793 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6794 | } else { |
6795 | self_ = self; |
6796 | } |
6797 | |
6798 | at::Tensor other_; |
6799 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
6800 | at::functionalization::impl::sync(other); |
6801 | other_ = at::functionalization::impl::from_functional_tensor(other); |
6802 | } else { |
6803 | other_ = other; |
6804 | } |
6805 | |
6806 | at::Tensor out_; |
6807 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6808 | at::functionalization::impl::sync(out); |
6809 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6810 | } else { |
6811 | out_ = out; |
6812 | } |
6813 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6814 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
6815 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6816 | TORCH_INTERNAL_ASSERT(false, |
6817 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6818 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6819 | } else { |
6820 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6821 | at::AutoDispatchSkipFunctionalize guard; |
6822 | at::Tensor tmp_output = at::_ops::mul_out::call(self_, other_, out_); |
6823 | return out;; |
6824 | } |
6825 | } else { |
6826 | at::Tensor tmp_output; |
6827 | { |
6828 | at::AutoDispatchSkipFunctionalize guard; |
6829 | tmp_output = at::_ops::mul_Tensor::call(self_, other_); |
6830 | } |
6831 | at::functionalization::impl::replace_(out, tmp_output); |
6832 | at::functionalization::impl::commit_update(out); |
6833 | at::functionalization::impl::sync(out); |
6834 | return out; |
6835 | } |
6836 | } |
6837 | |
6838 | at::Tensor & mul__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
6839 | if (true) { |
6840 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6841 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6842 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6843 | auto self_meta = to_meta(self); |
6844 | auto other_meta = to_meta(other); |
6845 | at::AutoDispatchSkipFunctionalize func_guard; |
6846 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6847 | at::_ops::mul__Tensor::call(self_meta, other_meta); |
6848 | } |
6849 | |
6850 | at::Tensor self_; |
6851 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6852 | at::functionalization::impl::sync(self); |
6853 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6854 | } else { |
6855 | self_ = self; |
6856 | } |
6857 | |
6858 | at::Tensor other_; |
6859 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
6860 | at::functionalization::impl::sync(other); |
6861 | other_ = at::functionalization::impl::from_functional_tensor(other); |
6862 | } else { |
6863 | other_ = other; |
6864 | } |
6865 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
6866 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
6867 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6868 | TORCH_INTERNAL_ASSERT(false, |
6869 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6870 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6871 | } else { |
6872 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6873 | at::AutoDispatchSkipFunctionalize guard; |
6874 | at::Tensor tmp_output = at::_ops::mul__Tensor::call(self_, other_); |
6875 | return self;; |
6876 | } |
6877 | } else { |
6878 | at::Tensor tmp_output; |
6879 | { |
6880 | at::AutoDispatchSkipFunctionalize guard; |
6881 | tmp_output = at::_ops::mul_Tensor::call(self_, other_); |
6882 | } |
6883 | at::functionalization::impl::replace_(self, tmp_output); |
6884 | at::functionalization::impl::commit_update(self); |
6885 | at::functionalization::impl::sync(self); |
6886 | return self; |
6887 | } |
6888 | } |
6889 | |
6890 | at::Tensor & mul_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
6891 | if (false) { |
6892 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6893 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6894 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6895 | auto self_meta = to_meta(self); |
6896 | auto out_meta = to_meta(out); |
6897 | at::AutoDispatchSkipFunctionalize func_guard; |
6898 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6899 | at::_ops::mul_Scalar_out::call(self_meta, other, out_meta); |
6900 | } |
6901 | |
6902 | at::Tensor self_; |
6903 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6904 | at::functionalization::impl::sync(self); |
6905 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6906 | } else { |
6907 | self_ = self; |
6908 | } |
6909 | |
6910 | at::Tensor out_; |
6911 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6912 | at::functionalization::impl::sync(out); |
6913 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6914 | } else { |
6915 | out_ = out; |
6916 | } |
6917 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6918 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6919 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6920 | TORCH_INTERNAL_ASSERT(false, |
6921 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6922 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6923 | } else { |
6924 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6925 | at::AutoDispatchSkipFunctionalize guard; |
6926 | at::Tensor tmp_output = at::_ops::mul_Scalar_out::call(self_, other, out_); |
6927 | return out;; |
6928 | } |
6929 | } else { |
6930 | at::Tensor tmp_output; |
6931 | { |
6932 | at::AutoDispatchSkipFunctionalize guard; |
6933 | tmp_output = at::_ops::mul_Scalar::call(self_, other); |
6934 | } |
6935 | at::functionalization::impl::replace_(out, tmp_output); |
6936 | at::functionalization::impl::commit_update(out); |
6937 | at::functionalization::impl::sync(out); |
6938 | return out; |
6939 | } |
6940 | } |
6941 | |
6942 | at::Tensor & mul__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
6943 | if (true) { |
6944 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6945 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6946 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6947 | auto self_meta = to_meta(self); |
6948 | at::AutoDispatchSkipFunctionalize func_guard; |
6949 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6950 | at::_ops::mul__Scalar::call(self_meta, other); |
6951 | } |
6952 | |
6953 | at::Tensor self_; |
6954 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6955 | at::functionalization::impl::sync(self); |
6956 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6957 | } else { |
6958 | self_ = self; |
6959 | } |
6960 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
6961 | if ((false)) { |
6962 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6963 | TORCH_INTERNAL_ASSERT(false, |
6964 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6965 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6966 | } else { |
6967 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6968 | at::AutoDispatchSkipFunctionalize guard; |
6969 | at::Tensor tmp_output = at::_ops::mul__Scalar::call(self_, other); |
6970 | return self;; |
6971 | } |
6972 | } else { |
6973 | at::Tensor tmp_output; |
6974 | { |
6975 | at::AutoDispatchSkipFunctionalize guard; |
6976 | tmp_output = at::_ops::mul_Scalar::call(self_, other); |
6977 | } |
6978 | at::functionalization::impl::replace_(self, tmp_output); |
6979 | at::functionalization::impl::commit_update(self); |
6980 | at::functionalization::impl::sync(self); |
6981 | return self; |
6982 | } |
6983 | } |
6984 | |
6985 | at::Tensor & mvlgamma_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out) { |
6986 | if (false) { |
6987 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6988 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6989 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6990 | auto self_meta = to_meta(self); |
6991 | auto out_meta = to_meta(out); |
6992 | at::AutoDispatchSkipFunctionalize func_guard; |
6993 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6994 | at::_ops::mvlgamma_out::call(self_meta, p, out_meta); |
6995 | } |
6996 | |
6997 | at::Tensor self_; |
6998 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6999 | at::functionalization::impl::sync(self); |
7000 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7001 | } else { |
7002 | self_ = self; |
7003 | } |
7004 | |
7005 | at::Tensor out_; |
7006 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7007 | at::functionalization::impl::sync(out); |
7008 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7009 | } else { |
7010 | out_ = out; |
7011 | } |
7012 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7013 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7014 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7015 | TORCH_INTERNAL_ASSERT(false, |
7016 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7017 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7018 | } else { |
7019 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7020 | at::AutoDispatchSkipFunctionalize guard; |
7021 | at::Tensor tmp_output = at::_ops::mvlgamma_out::call(self_, p, out_); |
7022 | return out;; |
7023 | } |
7024 | } else { |
7025 | at::Tensor tmp_output; |
7026 | { |
7027 | at::AutoDispatchSkipFunctionalize guard; |
7028 | tmp_output = at::_ops::mvlgamma::call(self_, p); |
7029 | } |
7030 | at::functionalization::impl::replace_(out, tmp_output); |
7031 | at::functionalization::impl::commit_update(out); |
7032 | at::functionalization::impl::sync(out); |
7033 | return out; |
7034 | } |
7035 | } |
7036 | |
7037 | at::Tensor & mvlgamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t p) { |
7038 | if (true) { |
7039 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7040 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7041 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7042 | auto self_meta = to_meta(self); |
7043 | at::AutoDispatchSkipFunctionalize func_guard; |
7044 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7045 | at::_ops::mvlgamma_::call(self_meta, p); |
7046 | } |
7047 | |
7048 | at::Tensor self_; |
7049 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7050 | at::functionalization::impl::sync(self); |
7051 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7052 | } else { |
7053 | self_ = self; |
7054 | } |
7055 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
7056 | if ((false)) { |
7057 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7058 | TORCH_INTERNAL_ASSERT(false, |
7059 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7060 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7061 | } else { |
7062 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7063 | at::AutoDispatchSkipFunctionalize guard; |
7064 | at::Tensor tmp_output = at::_ops::mvlgamma_::call(self_, p); |
7065 | return self;; |
7066 | } |
7067 | } else { |
7068 | at::Tensor tmp_output; |
7069 | { |
7070 | at::AutoDispatchSkipFunctionalize guard; |
7071 | tmp_output = at::_ops::mvlgamma::call(self_, p); |
7072 | } |
7073 | at::functionalization::impl::replace_(self, tmp_output); |
7074 | at::functionalization::impl::commit_update(self); |
7075 | at::functionalization::impl::sync(self); |
7076 | return self; |
7077 | } |
7078 | } |
7079 | |
7080 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> batch_norm_backward_reduce_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
7081 | if (false) { |
7082 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7083 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7084 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7085 | auto grad_out_meta = to_meta(grad_out); |
7086 | auto input_meta = to_meta(input); |
7087 | auto mean_meta = to_meta(mean); |
7088 | auto invstd_meta = to_meta(invstd); |
7089 | auto weight_meta = to_meta(weight); |
7090 | auto out0_meta = to_meta(out0); |
7091 | auto out1_meta = to_meta(out1); |
7092 | auto out2_meta = to_meta(out2); |
7093 | auto out3_meta = to_meta(out3); |
7094 | at::AutoDispatchSkipFunctionalize func_guard; |
7095 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7096 | at::_ops::batch_norm_backward_reduce_out::call(grad_out_meta, input_meta, mean_meta, invstd_meta, weight_meta, input_g, weight_g, bias_g, out0_meta, out1_meta, out2_meta, out3_meta); |
7097 | } |
7098 | |
7099 | at::Tensor grad_out_; |
7100 | if (at::functionalization::impl::isFunctionalTensor(grad_out)) { |
7101 | at::functionalization::impl::sync(grad_out); |
7102 | grad_out_ = at::functionalization::impl::from_functional_tensor(grad_out); |
7103 | } else { |
7104 | grad_out_ = grad_out; |
7105 | } |
7106 | |
7107 | at::Tensor input_; |
7108 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
7109 | at::functionalization::impl::sync(input); |
7110 | input_ = at::functionalization::impl::from_functional_tensor(input); |
7111 | } else { |
7112 | input_ = input; |
7113 | } |
7114 | |
7115 | at::Tensor mean_; |
7116 | if (at::functionalization::impl::isFunctionalTensor(mean)) { |
7117 | at::functionalization::impl::sync(mean); |
7118 | mean_ = at::functionalization::impl::from_functional_tensor(mean); |
7119 | } else { |
7120 | mean_ = mean; |
7121 | } |
7122 | |
7123 | at::Tensor invstd_; |
7124 | if (at::functionalization::impl::isFunctionalTensor(invstd)) { |
7125 | at::functionalization::impl::sync(invstd); |
7126 | invstd_ = at::functionalization::impl::from_functional_tensor(invstd); |
7127 | } else { |
7128 | invstd_ = invstd; |
7129 | } |
7130 | |
7131 | c10::optional<at::Tensor> weight_; |
7132 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
7133 | at::functionalization::impl::sync(weight); |
7134 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
7135 | } else { |
7136 | weight_ = weight; |
7137 | } |
7138 | |
7139 | at::Tensor out0_; |
7140 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
7141 | at::functionalization::impl::sync(out0); |
7142 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
7143 | } else { |
7144 | out0_ = out0; |
7145 | } |
7146 | |
7147 | at::Tensor out1_; |
7148 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
7149 | at::functionalization::impl::sync(out1); |
7150 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
7151 | } else { |
7152 | out1_ = out1; |
7153 | } |
7154 | |
7155 | at::Tensor out2_; |
7156 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
7157 | at::functionalization::impl::sync(out2); |
7158 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
7159 | } else { |
7160 | out2_ = out2; |
7161 | } |
7162 | |
7163 | at::Tensor out3_; |
7164 | if (at::functionalization::impl::isFunctionalTensor(out3)) { |
7165 | at::functionalization::impl::sync(out3); |
7166 | out3_ = at::functionalization::impl::from_functional_tensor(out3); |
7167 | } else { |
7168 | out3_ = out3; |
7169 | } |
7170 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) { |
7171 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_out) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(invstd) || at::functionalization::impl::isFunctionalTensor(weight))) { |
7172 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7173 | TORCH_INTERNAL_ASSERT(false, |
7174 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7175 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7176 | } else { |
7177 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7178 | at::AutoDispatchSkipFunctionalize guard; |
7179 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::batch_norm_backward_reduce_out::call(grad_out_, input_, mean_, invstd_, weight_, input_g, weight_g, bias_g, out0_, out1_, out2_, out3_); |
7180 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);; |
7181 | } |
7182 | } else { |
7183 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output; |
7184 | { |
7185 | at::AutoDispatchSkipFunctionalize guard; |
7186 | tmp_output = at::_ops::batch_norm_backward_reduce::call(grad_out_, input_, mean_, invstd_, weight_, input_g, weight_g, bias_g); |
7187 | } |
7188 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
7189 | at::functionalization::impl::commit_update(out0); |
7190 | at::functionalization::impl::sync(out0); |
7191 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
7192 | at::functionalization::impl::commit_update(out1); |
7193 | at::functionalization::impl::sync(out1); |
7194 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
7195 | at::functionalization::impl::commit_update(out2); |
7196 | at::functionalization::impl::sync(out2); |
7197 | at::functionalization::impl::replace_(out3, std::get<3>(tmp_output)); |
7198 | at::functionalization::impl::commit_update(out3); |
7199 | at::functionalization::impl::sync(out3); |
7200 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3); |
7201 | } |
7202 | } |
7203 | |
7204 | at::Tensor & deg2rad_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
7205 | if (false) { |
7206 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7207 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7208 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7209 | auto self_meta = to_meta(self); |
7210 | auto out_meta = to_meta(out); |
7211 | at::AutoDispatchSkipFunctionalize func_guard; |
7212 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7213 | at::_ops::deg2rad_out::call(self_meta, out_meta); |
7214 | } |
7215 | |
7216 | at::Tensor self_; |
7217 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7218 | at::functionalization::impl::sync(self); |
7219 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7220 | } else { |
7221 | self_ = self; |
7222 | } |
7223 | |
7224 | at::Tensor out_; |
7225 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7226 | at::functionalization::impl::sync(out); |
7227 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7228 | } else { |
7229 | out_ = out; |
7230 | } |
7231 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7232 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7233 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7234 | TORCH_INTERNAL_ASSERT(false, |
7235 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7236 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7237 | } else { |
7238 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7239 | at::AutoDispatchSkipFunctionalize guard; |
7240 | at::Tensor tmp_output = at::_ops::deg2rad_out::call(self_, out_); |
7241 | return out;; |
7242 | } |
7243 | } else { |
7244 | at::Tensor tmp_output; |
7245 | { |
7246 | at::AutoDispatchSkipFunctionalize guard; |
7247 | tmp_output = at::_ops::deg2rad::call(self_); |
7248 | } |
7249 | at::functionalization::impl::replace_(out, tmp_output); |
7250 | at::functionalization::impl::commit_update(out); |
7251 | at::functionalization::impl::sync(out); |
7252 | return out; |
7253 | } |
7254 | } |
7255 | |
7256 | at::Tensor & deg2rad_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
7257 | if (true) { |
7258 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7259 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7260 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7261 | auto self_meta = to_meta(self); |
7262 | at::AutoDispatchSkipFunctionalize func_guard; |
7263 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7264 | at::_ops::deg2rad_::call(self_meta); |
7265 | } |
7266 | |
7267 | at::Tensor self_; |
7268 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7269 | at::functionalization::impl::sync(self); |
7270 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7271 | } else { |
7272 | self_ = self; |
7273 | } |
7274 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
7275 | if ((false)) { |
7276 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7277 | TORCH_INTERNAL_ASSERT(false, |
7278 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7279 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7280 | } else { |
7281 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7282 | at::AutoDispatchSkipFunctionalize guard; |
7283 | at::Tensor tmp_output = at::_ops::deg2rad_::call(self_); |
7284 | return self;; |
7285 | } |
7286 | } else { |
7287 | at::Tensor tmp_output; |
7288 | { |
7289 | at::AutoDispatchSkipFunctionalize guard; |
7290 | tmp_output = at::_ops::deg2rad::call(self_); |
7291 | } |
7292 | at::functionalization::impl::replace_(self, tmp_output); |
7293 | at::functionalization::impl::commit_update(self); |
7294 | at::functionalization::impl::sync(self); |
7295 | return self; |
7296 | } |
7297 | } |
7298 | |
7299 | at::Tensor & randint_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
7300 | if (false) { |
7301 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7302 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7303 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7304 | auto self_meta = to_meta(self); |
7305 | auto out_meta = to_meta(out); |
7306 | at::AutoDispatchSkipFunctionalize func_guard; |
7307 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7308 | at::_ops::randint_like_out::call(self_meta, high, memory_format, out_meta); |
7309 | } |
7310 | |
7311 | at::Tensor self_; |
7312 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7313 | at::functionalization::impl::sync(self); |
7314 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7315 | } else { |
7316 | self_ = self; |
7317 | } |
7318 | |
7319 | at::Tensor out_; |
7320 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7321 | at::functionalization::impl::sync(out); |
7322 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7323 | } else { |
7324 | out_ = out; |
7325 | } |
7326 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7327 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7328 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7329 | TORCH_INTERNAL_ASSERT(false, |
7330 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7331 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7332 | } else { |
7333 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7334 | at::AutoDispatchSkipFunctionalize guard; |
7335 | at::Tensor tmp_output = at::_ops::randint_like_out::call(self_, high, memory_format, out_); |
7336 | return out;; |
7337 | } |
7338 | } else { |
7339 | at::Tensor tmp_output; |
7340 | { |
7341 | at::AutoDispatchSkipFunctionalize guard; |
7342 | tmp_output = at::_ops::randint_like::call(self_, high, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, memory_format); |
7343 | } |
7344 | at::functionalization::impl::replace_(out, tmp_output); |
7345 | at::functionalization::impl::commit_update(out); |
7346 | at::functionalization::impl::sync(out); |
7347 | return out; |
7348 | } |
7349 | } |
7350 | |
7351 | at::Tensor & randint_like_out_low_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
7352 | if (false) { |
7353 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7354 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7355 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7356 | auto self_meta = to_meta(self); |
7357 | auto out_meta = to_meta(out); |
7358 | at::AutoDispatchSkipFunctionalize func_guard; |
7359 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7360 | at::_ops::randint_like_low_dtype_out::call(self_meta, low, high, memory_format, out_meta); |
7361 | } |
7362 | |
7363 | at::Tensor self_; |
7364 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7365 | at::functionalization::impl::sync(self); |
7366 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7367 | } else { |
7368 | self_ = self; |
7369 | } |
7370 | |
7371 | at::Tensor out_; |
7372 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7373 | at::functionalization::impl::sync(out); |
7374 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7375 | } else { |
7376 | out_ = out; |
7377 | } |
7378 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7379 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7380 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7381 | TORCH_INTERNAL_ASSERT(false, |
7382 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7383 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7384 | } else { |
7385 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7386 | at::AutoDispatchSkipFunctionalize guard; |
7387 | at::Tensor tmp_output = at::_ops::randint_like_low_dtype_out::call(self_, low, high, memory_format, out_); |
7388 | return out;; |
7389 | } |
7390 | } else { |
7391 | at::Tensor tmp_output; |
7392 | { |
7393 | at::AutoDispatchSkipFunctionalize guard; |
7394 | tmp_output = at::_ops::randint_like_low_dtype::call(self_, low, high, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, memory_format); |
7395 | } |
7396 | at::functionalization::impl::replace_(out, tmp_output); |
7397 | at::functionalization::impl::commit_update(out); |
7398 | at::functionalization::impl::sync(out); |
7399 | return out; |
7400 | } |
7401 | } |
7402 | |
7403 | at::Tensor & repeat_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out) { |
7404 | if (false) { |
7405 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7406 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7407 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7408 | auto self_meta = to_meta(self); |
7409 | auto out_meta = to_meta(out); |
7410 | at::AutoDispatchSkipFunctionalize func_guard; |
7411 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7412 | at::_ops::repeat_out::call(self_meta, repeats, out_meta); |
7413 | } |
7414 | |
7415 | at::Tensor self_; |
7416 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7417 | at::functionalization::impl::sync(self); |
7418 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7419 | } else { |
7420 | self_ = self; |
7421 | } |
7422 | |
7423 | at::Tensor out_; |
7424 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7425 | at::functionalization::impl::sync(out); |
7426 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7427 | } else { |
7428 | out_ = out; |
7429 | } |
7430 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7431 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7432 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7433 | TORCH_INTERNAL_ASSERT(false, |
7434 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7435 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7436 | } else { |
7437 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7438 | at::AutoDispatchSkipFunctionalize guard; |
7439 | at::Tensor tmp_output = at::_ops::repeat_out::call(self_, repeats, out_); |
7440 | return out;; |
7441 | } |
7442 | } else { |
7443 | at::Tensor tmp_output; |
7444 | { |
7445 | at::AutoDispatchSkipFunctionalize guard; |
7446 | tmp_output = at::_ops::repeat::call(self_, repeats); |
7447 | } |
7448 | at::functionalization::impl::replace_(out, tmp_output); |
7449 | at::functionalization::impl::commit_update(out); |
7450 | at::functionalization::impl::sync(out); |
7451 | return out; |
7452 | } |
7453 | } |
7454 | |
7455 | at::Tensor & _mkldnn_reshape_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) { |
7456 | if (false) { |
7457 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7458 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7459 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7460 | auto self_meta = to_meta(self); |
7461 | auto out_meta = to_meta(out); |
7462 | at::AutoDispatchSkipFunctionalize func_guard; |
7463 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7464 | at::_ops::_mkldnn_reshape_out::call(self_meta, shape, out_meta); |
7465 | } |
7466 | |
7467 | at::Tensor self_; |
7468 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7469 | at::functionalization::impl::sync(self); |
7470 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7471 | } else { |
7472 | self_ = self; |
7473 | } |
7474 | |
7475 | at::Tensor out_; |
7476 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7477 | at::functionalization::impl::sync(out); |
7478 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7479 | } else { |
7480 | out_ = out; |
7481 | } |
7482 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7483 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7484 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7485 | TORCH_INTERNAL_ASSERT(false, |
7486 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7487 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7488 | } else { |
7489 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7490 | at::AutoDispatchSkipFunctionalize guard; |
7491 | at::Tensor tmp_output = at::_ops::_mkldnn_reshape_out::call(self_, shape, out_); |
7492 | return out;; |
7493 | } |
7494 | } else { |
7495 | at::Tensor tmp_output; |
7496 | { |
7497 | at::AutoDispatchSkipFunctionalize guard; |
7498 | tmp_output = at::_ops::_mkldnn_reshape::call(self_, shape); |
7499 | } |
7500 | at::functionalization::impl::replace_(out, tmp_output); |
7501 | at::functionalization::impl::commit_update(out); |
7502 | at::functionalization::impl::sync(out); |
7503 | return out; |
7504 | } |
7505 | } |
7506 | |
7507 | at::Tensor & round_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
7508 | if (false) { |
7509 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7510 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7511 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7512 | auto self_meta = to_meta(self); |
7513 | auto out_meta = to_meta(out); |
7514 | at::AutoDispatchSkipFunctionalize func_guard; |
7515 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7516 | at::_ops::round_out::call(self_meta, out_meta); |
7517 | } |
7518 | |
7519 | at::Tensor self_; |
7520 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7521 | at::functionalization::impl::sync(self); |
7522 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7523 | } else { |
7524 | self_ = self; |
7525 | } |
7526 | |
7527 | at::Tensor out_; |
7528 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7529 | at::functionalization::impl::sync(out); |
7530 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7531 | } else { |
7532 | out_ = out; |
7533 | } |
7534 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7535 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7536 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7537 | TORCH_INTERNAL_ASSERT(false, |
7538 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7539 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7540 | } else { |
7541 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7542 | at::AutoDispatchSkipFunctionalize guard; |
7543 | at::Tensor tmp_output = at::_ops::round_out::call(self_, out_); |
7544 | return out;; |
7545 | } |
7546 | } else { |
7547 | at::Tensor tmp_output; |
7548 | { |
7549 | at::AutoDispatchSkipFunctionalize guard; |
7550 | tmp_output = at::_ops::round::call(self_); |
7551 | } |
7552 | at::functionalization::impl::replace_(out, tmp_output); |
7553 | at::functionalization::impl::commit_update(out); |
7554 | at::functionalization::impl::sync(out); |
7555 | return out; |
7556 | } |
7557 | } |
7558 | |
7559 | at::Tensor & round_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
7560 | if (true) { |
7561 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7562 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7563 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7564 | auto self_meta = to_meta(self); |
7565 | at::AutoDispatchSkipFunctionalize func_guard; |
7566 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7567 | at::_ops::round_::call(self_meta); |
7568 | } |
7569 | |
7570 | at::Tensor self_; |
7571 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7572 | at::functionalization::impl::sync(self); |
7573 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7574 | } else { |
7575 | self_ = self; |
7576 | } |
7577 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
7578 | if ((false)) { |
7579 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7580 | TORCH_INTERNAL_ASSERT(false, |
7581 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7582 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7583 | } else { |
7584 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7585 | at::AutoDispatchSkipFunctionalize guard; |
7586 | at::Tensor tmp_output = at::_ops::round_::call(self_); |
7587 | return self;; |
7588 | } |
7589 | } else { |
7590 | at::Tensor tmp_output; |
7591 | { |
7592 | at::AutoDispatchSkipFunctionalize guard; |
7593 | tmp_output = at::_ops::round::call(self_); |
7594 | } |
7595 | at::functionalization::impl::replace_(self, tmp_output); |
7596 | at::functionalization::impl::commit_update(self); |
7597 | at::functionalization::impl::sync(self); |
7598 | return self; |
7599 | } |
7600 | } |
7601 | |
7602 | at::Tensor & round_out_decimals_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out) { |
7603 | if (false) { |
7604 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7605 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7606 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7607 | auto self_meta = to_meta(self); |
7608 | auto out_meta = to_meta(out); |
7609 | at::AutoDispatchSkipFunctionalize func_guard; |
7610 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7611 | at::_ops::round_decimals_out::call(self_meta, decimals, out_meta); |
7612 | } |
7613 | |
7614 | at::Tensor self_; |
7615 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7616 | at::functionalization::impl::sync(self); |
7617 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7618 | } else { |
7619 | self_ = self; |
7620 | } |
7621 | |
7622 | at::Tensor out_; |
7623 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7624 | at::functionalization::impl::sync(out); |
7625 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7626 | } else { |
7627 | out_ = out; |
7628 | } |
7629 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7630 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7631 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7632 | TORCH_INTERNAL_ASSERT(false, |
7633 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7634 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7635 | } else { |
7636 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7637 | at::AutoDispatchSkipFunctionalize guard; |
7638 | at::Tensor tmp_output = at::_ops::round_decimals_out::call(self_, decimals, out_); |
7639 | return out;; |
7640 | } |
7641 | } else { |
7642 | at::Tensor tmp_output; |
7643 | { |
7644 | at::AutoDispatchSkipFunctionalize guard; |
7645 | tmp_output = at::_ops::round_decimals::call(self_, decimals); |
7646 | } |
7647 | at::functionalization::impl::replace_(out, tmp_output); |
7648 | at::functionalization::impl::commit_update(out); |
7649 | at::functionalization::impl::sync(out); |
7650 | return out; |
7651 | } |
7652 | } |
7653 | |
7654 | at::Tensor & round__decimals(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t decimals) { |
7655 | if (true) { |
7656 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7657 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7658 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7659 | auto self_meta = to_meta(self); |
7660 | at::AutoDispatchSkipFunctionalize func_guard; |
7661 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7662 | at::_ops::round__decimals::call(self_meta, decimals); |
7663 | } |
7664 | |
7665 | at::Tensor self_; |
7666 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7667 | at::functionalization::impl::sync(self); |
7668 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7669 | } else { |
7670 | self_ = self; |
7671 | } |
7672 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
7673 | if ((false)) { |
7674 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7675 | TORCH_INTERNAL_ASSERT(false, |
7676 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7677 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7678 | } else { |
7679 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7680 | at::AutoDispatchSkipFunctionalize guard; |
7681 | at::Tensor tmp_output = at::_ops::round__decimals::call(self_, decimals); |
7682 | return self;; |
7683 | } |
7684 | } else { |
7685 | at::Tensor tmp_output; |
7686 | { |
7687 | at::AutoDispatchSkipFunctionalize guard; |
7688 | tmp_output = at::_ops::round_decimals::call(self_, decimals); |
7689 | } |
7690 | at::functionalization::impl::replace_(self, tmp_output); |
7691 | at::functionalization::impl::commit_update(self); |
7692 | at::functionalization::impl::sync(self); |
7693 | return self; |
7694 | } |
7695 | } |
7696 | |
7697 | at::Tensor & rsqrt_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
7698 | if (false) { |
7699 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7700 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7701 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7702 | auto self_meta = to_meta(self); |
7703 | auto out_meta = to_meta(out); |
7704 | at::AutoDispatchSkipFunctionalize func_guard; |
7705 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7706 | at::_ops::rsqrt_out::call(self_meta, out_meta); |
7707 | } |
7708 | |
7709 | at::Tensor self_; |
7710 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7711 | at::functionalization::impl::sync(self); |
7712 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7713 | } else { |
7714 | self_ = self; |
7715 | } |
7716 | |
7717 | at::Tensor out_; |
7718 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7719 | at::functionalization::impl::sync(out); |
7720 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7721 | } else { |
7722 | out_ = out; |
7723 | } |
7724 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7725 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7726 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7727 | TORCH_INTERNAL_ASSERT(false, |
7728 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7729 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7730 | } else { |
7731 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7732 | at::AutoDispatchSkipFunctionalize guard; |
7733 | at::Tensor tmp_output = at::_ops::rsqrt_out::call(self_, out_); |
7734 | return out;; |
7735 | } |
7736 | } else { |
7737 | at::Tensor tmp_output; |
7738 | { |
7739 | at::AutoDispatchSkipFunctionalize guard; |
7740 | tmp_output = at::_ops::rsqrt::call(self_); |
7741 | } |
7742 | at::functionalization::impl::replace_(out, tmp_output); |
7743 | at::functionalization::impl::commit_update(out); |
7744 | at::functionalization::impl::sync(out); |
7745 | return out; |
7746 | } |
7747 | } |
7748 | |
7749 | at::Tensor & rsqrt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
7750 | if (true) { |
7751 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7752 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7753 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7754 | auto self_meta = to_meta(self); |
7755 | at::AutoDispatchSkipFunctionalize func_guard; |
7756 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7757 | at::_ops::rsqrt_::call(self_meta); |
7758 | } |
7759 | |
7760 | at::Tensor self_; |
7761 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7762 | at::functionalization::impl::sync(self); |
7763 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7764 | } else { |
7765 | self_ = self; |
7766 | } |
7767 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
7768 | if ((false)) { |
7769 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7770 | TORCH_INTERNAL_ASSERT(false, |
7771 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7772 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7773 | } else { |
7774 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7775 | at::AutoDispatchSkipFunctionalize guard; |
7776 | at::Tensor tmp_output = at::_ops::rsqrt_::call(self_); |
7777 | return self;; |
7778 | } |
7779 | } else { |
7780 | at::Tensor tmp_output; |
7781 | { |
7782 | at::AutoDispatchSkipFunctionalize guard; |
7783 | tmp_output = at::_ops::rsqrt::call(self_); |
7784 | } |
7785 | at::functionalization::impl::replace_(self, tmp_output); |
7786 | at::functionalization::impl::commit_update(self); |
7787 | at::functionalization::impl::sync(self); |
7788 | return self; |
7789 | } |
7790 | } |
7791 | |
7792 | at::Tensor & celu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out) { |
7793 | if (false) { |
7794 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7795 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7796 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7797 | auto self_meta = to_meta(self); |
7798 | auto out_meta = to_meta(out); |
7799 | at::AutoDispatchSkipFunctionalize func_guard; |
7800 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7801 | at::_ops::celu_out::call(self_meta, alpha, out_meta); |
7802 | } |
7803 | |
7804 | at::Tensor self_; |
7805 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7806 | at::functionalization::impl::sync(self); |
7807 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7808 | } else { |
7809 | self_ = self; |
7810 | } |
7811 | |
7812 | at::Tensor out_; |
7813 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7814 | at::functionalization::impl::sync(out); |
7815 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7816 | } else { |
7817 | out_ = out; |
7818 | } |
7819 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7820 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7821 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7822 | TORCH_INTERNAL_ASSERT(false, |
7823 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7824 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7825 | } else { |
7826 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7827 | at::AutoDispatchSkipFunctionalize guard; |
7828 | at::Tensor tmp_output = at::_ops::celu_out::call(self_, alpha, out_); |
7829 | return out;; |
7830 | } |
7831 | } else { |
7832 | at::Tensor tmp_output; |
7833 | { |
7834 | at::AutoDispatchSkipFunctionalize guard; |
7835 | tmp_output = at::_ops::celu::call(self_, alpha); |
7836 | } |
7837 | at::functionalization::impl::replace_(out, tmp_output); |
7838 | at::functionalization::impl::commit_update(out); |
7839 | at::functionalization::impl::sync(out); |
7840 | return out; |
7841 | } |
7842 | } |
7843 | |
7844 | at::Tensor & celu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha) { |
7845 | if (true) { |
7846 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7847 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7848 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7849 | auto self_meta = to_meta(self); |
7850 | at::AutoDispatchSkipFunctionalize func_guard; |
7851 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7852 | at::_ops::celu_::call(self_meta, alpha); |
7853 | } |
7854 | |
7855 | at::Tensor self_; |
7856 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7857 | at::functionalization::impl::sync(self); |
7858 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7859 | } else { |
7860 | self_ = self; |
7861 | } |
7862 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
7863 | if ((false)) { |
7864 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7865 | TORCH_INTERNAL_ASSERT(false, |
7866 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7867 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7868 | } else { |
7869 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7870 | at::AutoDispatchSkipFunctionalize guard; |
7871 | at::Tensor tmp_output = at::_ops::celu_::call(self_, alpha); |
7872 | return self;; |
7873 | } |
7874 | } else { |
7875 | at::Tensor tmp_output; |
7876 | { |
7877 | at::AutoDispatchSkipFunctionalize guard; |
7878 | tmp_output = at::_ops::celu::call(self_, alpha); |
7879 | } |
7880 | at::functionalization::impl::replace_(self, tmp_output); |
7881 | at::functionalization::impl::commit_update(self); |
7882 | at::functionalization::impl::sync(self); |
7883 | return self; |
7884 | } |
7885 | } |
7886 | |
7887 | at::Tensor & sigmoid_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
7888 | if (false) { |
7889 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7890 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7891 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7892 | auto self_meta = to_meta(self); |
7893 | auto out_meta = to_meta(out); |
7894 | at::AutoDispatchSkipFunctionalize func_guard; |
7895 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7896 | at::_ops::sigmoid_out::call(self_meta, out_meta); |
7897 | } |
7898 | |
7899 | at::Tensor self_; |
7900 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7901 | at::functionalization::impl::sync(self); |
7902 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7903 | } else { |
7904 | self_ = self; |
7905 | } |
7906 | |
7907 | at::Tensor out_; |
7908 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7909 | at::functionalization::impl::sync(out); |
7910 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7911 | } else { |
7912 | out_ = out; |
7913 | } |
7914 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7915 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7916 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7917 | TORCH_INTERNAL_ASSERT(false, |
7918 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7919 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7920 | } else { |
7921 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7922 | at::AutoDispatchSkipFunctionalize guard; |
7923 | at::Tensor tmp_output = at::_ops::sigmoid_out::call(self_, out_); |
7924 | return out;; |
7925 | } |
7926 | } else { |
7927 | at::Tensor tmp_output; |
7928 | { |
7929 | at::AutoDispatchSkipFunctionalize guard; |
7930 | tmp_output = at::_ops::sigmoid::call(self_); |
7931 | } |
7932 | at::functionalization::impl::replace_(out, tmp_output); |
7933 | at::functionalization::impl::commit_update(out); |
7934 | at::functionalization::impl::sync(out); |
7935 | return out; |
7936 | } |
7937 | } |
7938 | |
7939 | at::Tensor & sigmoid_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
7940 | if (true) { |
7941 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7942 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7943 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7944 | auto self_meta = to_meta(self); |
7945 | at::AutoDispatchSkipFunctionalize func_guard; |
7946 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7947 | at::_ops::sigmoid_::call(self_meta); |
7948 | } |
7949 | |
7950 | at::Tensor self_; |
7951 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7952 | at::functionalization::impl::sync(self); |
7953 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7954 | } else { |
7955 | self_ = self; |
7956 | } |
7957 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
7958 | if ((false)) { |
7959 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7960 | TORCH_INTERNAL_ASSERT(false, |
7961 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7962 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7963 | } else { |
7964 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7965 | at::AutoDispatchSkipFunctionalize guard; |
7966 | at::Tensor tmp_output = at::_ops::sigmoid_::call(self_); |
7967 | return self;; |
7968 | } |
7969 | } else { |
7970 | at::Tensor tmp_output; |
7971 | { |
7972 | at::AutoDispatchSkipFunctionalize guard; |
7973 | tmp_output = at::_ops::sigmoid::call(self_); |
7974 | } |
7975 | at::functionalization::impl::replace_(self, tmp_output); |
7976 | at::functionalization::impl::commit_update(self); |
7977 | at::functionalization::impl::sync(self); |
7978 | return self; |
7979 | } |
7980 | } |
7981 | |
7982 | at::Tensor & sinc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
7983 | if (false) { |
7984 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7985 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7986 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7987 | auto self_meta = to_meta(self); |
7988 | auto out_meta = to_meta(out); |
7989 | at::AutoDispatchSkipFunctionalize func_guard; |
7990 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7991 | at::_ops::sinc_out::call(self_meta, out_meta); |
7992 | } |
7993 | |
7994 | at::Tensor self_; |
7995 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7996 | at::functionalization::impl::sync(self); |
7997 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7998 | } else { |
7999 | self_ = self; |
8000 | } |
8001 | |
8002 | at::Tensor out_; |
8003 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8004 | at::functionalization::impl::sync(out); |
8005 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8006 | } else { |
8007 | out_ = out; |
8008 | } |
8009 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8010 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8011 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8012 | TORCH_INTERNAL_ASSERT(false, |
8013 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8014 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8015 | } else { |
8016 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8017 | at::AutoDispatchSkipFunctionalize guard; |
8018 | at::Tensor tmp_output = at::_ops::sinc_out::call(self_, out_); |
8019 | return out;; |
8020 | } |
8021 | } else { |
8022 | at::Tensor tmp_output; |
8023 | { |
8024 | at::AutoDispatchSkipFunctionalize guard; |
8025 | tmp_output = at::_ops::sinc::call(self_); |
8026 | } |
8027 | at::functionalization::impl::replace_(out, tmp_output); |
8028 | at::functionalization::impl::commit_update(out); |
8029 | at::functionalization::impl::sync(out); |
8030 | return out; |
8031 | } |
8032 | } |
8033 | |
8034 | at::Tensor & sinc_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
8035 | if (true) { |
8036 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8037 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8038 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8039 | auto self_meta = to_meta(self); |
8040 | at::AutoDispatchSkipFunctionalize func_guard; |
8041 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8042 | at::_ops::sinc_::call(self_meta); |
8043 | } |
8044 | |
8045 | at::Tensor self_; |
8046 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8047 | at::functionalization::impl::sync(self); |
8048 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8049 | } else { |
8050 | self_ = self; |
8051 | } |
8052 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
8053 | if ((false)) { |
8054 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8055 | TORCH_INTERNAL_ASSERT(false, |
8056 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8057 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8058 | } else { |
8059 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8060 | at::AutoDispatchSkipFunctionalize guard; |
8061 | at::Tensor tmp_output = at::_ops::sinc_::call(self_); |
8062 | return self;; |
8063 | } |
8064 | } else { |
8065 | at::Tensor tmp_output; |
8066 | { |
8067 | at::AutoDispatchSkipFunctionalize guard; |
8068 | tmp_output = at::_ops::sinc::call(self_); |
8069 | } |
8070 | at::functionalization::impl::replace_(self, tmp_output); |
8071 | at::functionalization::impl::commit_update(self); |
8072 | at::functionalization::impl::sync(self); |
8073 | return self; |
8074 | } |
8075 | } |
8076 | |
8077 | at::Tensor & sinh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
8078 | if (false) { |
8079 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8080 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8081 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8082 | auto self_meta = to_meta(self); |
8083 | auto out_meta = to_meta(out); |
8084 | at::AutoDispatchSkipFunctionalize func_guard; |
8085 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8086 | at::_ops::sinh_out::call(self_meta, out_meta); |
8087 | } |
8088 | |
8089 | at::Tensor self_; |
8090 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8091 | at::functionalization::impl::sync(self); |
8092 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8093 | } else { |
8094 | self_ = self; |
8095 | } |
8096 | |
8097 | at::Tensor out_; |
8098 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8099 | at::functionalization::impl::sync(out); |
8100 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8101 | } else { |
8102 | out_ = out; |
8103 | } |
8104 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8105 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8106 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8107 | TORCH_INTERNAL_ASSERT(false, |
8108 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8109 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8110 | } else { |
8111 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8112 | at::AutoDispatchSkipFunctionalize guard; |
8113 | at::Tensor tmp_output = at::_ops::sinh_out::call(self_, out_); |
8114 | return out;; |
8115 | } |
8116 | } else { |
8117 | at::Tensor tmp_output; |
8118 | { |
8119 | at::AutoDispatchSkipFunctionalize guard; |
8120 | tmp_output = at::_ops::sinh::call(self_); |
8121 | } |
8122 | at::functionalization::impl::replace_(out, tmp_output); |
8123 | at::functionalization::impl::commit_update(out); |
8124 | at::functionalization::impl::sync(out); |
8125 | return out; |
8126 | } |
8127 | } |
8128 | |
8129 | at::Tensor & sinh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
8130 | if (true) { |
8131 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8132 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8133 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8134 | auto self_meta = to_meta(self); |
8135 | at::AutoDispatchSkipFunctionalize func_guard; |
8136 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8137 | at::_ops::sinh_::call(self_meta); |
8138 | } |
8139 | |
8140 | at::Tensor self_; |
8141 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8142 | at::functionalization::impl::sync(self); |
8143 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8144 | } else { |
8145 | self_ = self; |
8146 | } |
8147 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
8148 | if ((false)) { |
8149 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8150 | TORCH_INTERNAL_ASSERT(false, |
8151 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8152 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8153 | } else { |
8154 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8155 | at::AutoDispatchSkipFunctionalize guard; |
8156 | at::Tensor tmp_output = at::_ops::sinh_::call(self_); |
8157 | return self;; |
8158 | } |
8159 | } else { |
8160 | at::Tensor tmp_output; |
8161 | { |
8162 | at::AutoDispatchSkipFunctionalize guard; |
8163 | tmp_output = at::_ops::sinh::call(self_); |
8164 | } |
8165 | at::functionalization::impl::replace_(self, tmp_output); |
8166 | at::functionalization::impl::commit_update(self); |
8167 | at::functionalization::impl::sync(self); |
8168 | return self; |
8169 | } |
8170 | } |
8171 | |
8172 | at::Tensor & slice_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out) { |
8173 | if (false) { |
8174 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8175 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8176 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8177 | auto grad_output_meta = to_meta(grad_output); |
8178 | auto out_meta = to_meta(out); |
8179 | at::AutoDispatchSkipFunctionalize func_guard; |
8180 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8181 | at::_ops::slice_backward_out::call(grad_output_meta, input_sizes, dim, start, end, step, out_meta); |
8182 | } |
8183 | |
8184 | at::Tensor grad_output_; |
8185 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
8186 | at::functionalization::impl::sync(grad_output); |
8187 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
8188 | } else { |
8189 | grad_output_ = grad_output; |
8190 | } |
8191 | |
8192 | at::Tensor out_; |
8193 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8194 | at::functionalization::impl::sync(out); |
8195 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8196 | } else { |
8197 | out_ = out; |
8198 | } |
8199 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8200 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) { |
8201 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8202 | TORCH_INTERNAL_ASSERT(false, |
8203 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8204 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8205 | } else { |
8206 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8207 | at::AutoDispatchSkipFunctionalize guard; |
8208 | at::Tensor tmp_output = at::_ops::slice_backward_out::call(grad_output_, input_sizes, dim, start, end, step, out_); |
8209 | return out;; |
8210 | } |
8211 | } else { |
8212 | at::Tensor tmp_output; |
8213 | { |
8214 | at::AutoDispatchSkipFunctionalize guard; |
8215 | tmp_output = at::_ops::slice_backward::call(grad_output_, input_sizes, dim, start, end, step); |
8216 | } |
8217 | at::functionalization::impl::replace_(out, tmp_output); |
8218 | at::functionalization::impl::commit_update(out); |
8219 | at::functionalization::impl::sync(out); |
8220 | return out; |
8221 | } |
8222 | } |
8223 | |
8224 | at::Tensor & as_strided_scatter_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) { |
8225 | if (false) { |
8226 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8227 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8228 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8229 | auto self_meta = to_meta(self); |
8230 | auto src_meta = to_meta(src); |
8231 | auto out_meta = to_meta(out); |
8232 | at::AutoDispatchSkipFunctionalize func_guard; |
8233 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8234 | at::_ops::as_strided_scatter_out::call(self_meta, src_meta, size, stride, storage_offset, out_meta); |
8235 | } |
8236 | |
8237 | at::Tensor self_; |
8238 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8239 | at::functionalization::impl::sync(self); |
8240 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8241 | } else { |
8242 | self_ = self; |
8243 | } |
8244 | |
8245 | at::Tensor src_; |
8246 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
8247 | at::functionalization::impl::sync(src); |
8248 | src_ = at::functionalization::impl::from_functional_tensor(src); |
8249 | } else { |
8250 | src_ = src; |
8251 | } |
8252 | |
8253 | at::Tensor out_; |
8254 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8255 | at::functionalization::impl::sync(out); |
8256 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8257 | } else { |
8258 | out_ = out; |
8259 | } |
8260 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8261 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(src))) { |
8262 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8263 | TORCH_INTERNAL_ASSERT(false, |
8264 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8265 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8266 | } else { |
8267 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8268 | at::AutoDispatchSkipFunctionalize guard; |
8269 | at::Tensor tmp_output = at::_ops::as_strided_scatter_out::call(self_, src_, size, stride, storage_offset, out_); |
8270 | return out;; |
8271 | } |
8272 | } else { |
8273 | at::Tensor tmp_output; |
8274 | { |
8275 | at::AutoDispatchSkipFunctionalize guard; |
8276 | tmp_output = at::_ops::as_strided_scatter::call(self_, src_, size, stride, storage_offset); |
8277 | } |
8278 | at::functionalization::impl::replace_(out, tmp_output); |
8279 | at::functionalization::impl::commit_update(out); |
8280 | at::functionalization::impl::sync(out); |
8281 | return out; |
8282 | } |
8283 | } |
8284 | |
8285 | ::std::tuple<at::Tensor &,at::Tensor &> std_mean_out_correction_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) { |
8286 | if (false) { |
8287 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8288 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8289 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8290 | auto self_meta = to_meta(self); |
8291 | auto out0_meta = to_meta(out0); |
8292 | auto out1_meta = to_meta(out1); |
8293 | at::AutoDispatchSkipFunctionalize func_guard; |
8294 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8295 | at::_ops::std_mean_correction_out::call(self_meta, dim, correction, keepdim, out0_meta, out1_meta); |
8296 | } |
8297 | |
8298 | at::Tensor self_; |
8299 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8300 | at::functionalization::impl::sync(self); |
8301 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8302 | } else { |
8303 | self_ = self; |
8304 | } |
8305 | |
8306 | at::Tensor out0_; |
8307 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
8308 | at::functionalization::impl::sync(out0); |
8309 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
8310 | } else { |
8311 | out0_ = out0; |
8312 | } |
8313 | |
8314 | at::Tensor out1_; |
8315 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
8316 | at::functionalization::impl::sync(out1); |
8317 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
8318 | } else { |
8319 | out1_ = out1; |
8320 | } |
8321 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
8322 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8323 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8324 | TORCH_INTERNAL_ASSERT(false, |
8325 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8326 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8327 | } else { |
8328 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8329 | at::AutoDispatchSkipFunctionalize guard; |
8330 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::std_mean_correction_out::call(self_, dim, correction, keepdim, out0_, out1_); |
8331 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
8332 | } |
8333 | } else { |
8334 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
8335 | { |
8336 | at::AutoDispatchSkipFunctionalize guard; |
8337 | tmp_output = at::_ops::std_mean_correction::call(self_, dim, correction, keepdim); |
8338 | } |
8339 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
8340 | at::functionalization::impl::commit_update(out0); |
8341 | at::functionalization::impl::sync(out0); |
8342 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
8343 | at::functionalization::impl::commit_update(out1); |
8344 | at::functionalization::impl::sync(out1); |
8345 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
8346 | } |
8347 | } |
8348 | |
8349 | at::Tensor & _mkldnn_transpose_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { |
8350 | if (false) { |
8351 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8352 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8353 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8354 | auto self_meta = to_meta(self); |
8355 | auto out_meta = to_meta(out); |
8356 | at::AutoDispatchSkipFunctionalize func_guard; |
8357 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8358 | at::_ops::_mkldnn_transpose_out::call(self_meta, dim0, dim1, out_meta); |
8359 | } |
8360 | |
8361 | at::Tensor self_; |
8362 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8363 | at::functionalization::impl::sync(self); |
8364 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8365 | } else { |
8366 | self_ = self; |
8367 | } |
8368 | |
8369 | at::Tensor out_; |
8370 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8371 | at::functionalization::impl::sync(out); |
8372 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8373 | } else { |
8374 | out_ = out; |
8375 | } |
8376 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8377 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8378 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8379 | TORCH_INTERNAL_ASSERT(false, |
8380 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8381 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8382 | } else { |
8383 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8384 | at::AutoDispatchSkipFunctionalize guard; |
8385 | at::Tensor tmp_output = at::_ops::_mkldnn_transpose_out::call(self_, dim0, dim1, out_); |
8386 | return out;; |
8387 | } |
8388 | } else { |
8389 | at::Tensor tmp_output; |
8390 | { |
8391 | at::AutoDispatchSkipFunctionalize guard; |
8392 | tmp_output = at::_ops::_mkldnn_transpose::call(self_, dim0, dim1); |
8393 | } |
8394 | at::functionalization::impl::replace_(out, tmp_output); |
8395 | at::functionalization::impl::commit_update(out); |
8396 | at::functionalization::impl::sync(out); |
8397 | return out; |
8398 | } |
8399 | } |
8400 | |
8401 | at::Tensor & _mkldnn_transpose_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) { |
8402 | if (true) { |
8403 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8404 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8405 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8406 | auto self_meta = to_meta(self); |
8407 | at::AutoDispatchSkipFunctionalize func_guard; |
8408 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8409 | at::_ops::_mkldnn_transpose_::call(self_meta, dim0, dim1); |
8410 | } |
8411 | |
8412 | at::Tensor self_; |
8413 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8414 | at::functionalization::impl::sync(self); |
8415 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8416 | } else { |
8417 | self_ = self; |
8418 | } |
8419 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
8420 | if ((false)) { |
8421 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8422 | TORCH_INTERNAL_ASSERT(false, |
8423 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8424 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8425 | } else { |
8426 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8427 | at::AutoDispatchSkipFunctionalize guard; |
8428 | at::Tensor tmp_output = at::_ops::_mkldnn_transpose_::call(self_, dim0, dim1); |
8429 | return self;; |
8430 | } |
8431 | } else { |
8432 | at::Tensor tmp_output; |
8433 | { |
8434 | at::AutoDispatchSkipFunctionalize guard; |
8435 | tmp_output = at::_ops::_mkldnn_transpose::call(self_, dim0, dim1); |
8436 | } |
8437 | at::functionalization::impl::replace_(self, tmp_output); |
8438 | at::functionalization::impl::commit_update(self); |
8439 | at::functionalization::impl::sync(self); |
8440 | return self; |
8441 | } |
8442 | } |
8443 | |
8444 | at::Tensor & flip_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { |
8445 | if (false) { |
8446 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8447 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8448 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8449 | auto self_meta = to_meta(self); |
8450 | auto out_meta = to_meta(out); |
8451 | at::AutoDispatchSkipFunctionalize func_guard; |
8452 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8453 | at::_ops::flip_out::call(self_meta, dims, out_meta); |
8454 | } |
8455 | |
8456 | at::Tensor self_; |
8457 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8458 | at::functionalization::impl::sync(self); |
8459 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8460 | } else { |
8461 | self_ = self; |
8462 | } |
8463 | |
8464 | at::Tensor out_; |
8465 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8466 | at::functionalization::impl::sync(out); |
8467 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8468 | } else { |
8469 | out_ = out; |
8470 | } |
8471 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8472 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8473 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8474 | TORCH_INTERNAL_ASSERT(false, |
8475 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8476 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8477 | } else { |
8478 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8479 | at::AutoDispatchSkipFunctionalize guard; |
8480 | at::Tensor tmp_output = at::_ops::flip_out::call(self_, dims, out_); |
8481 | return out;; |
8482 | } |
8483 | } else { |
8484 | at::Tensor tmp_output; |
8485 | { |
8486 | at::AutoDispatchSkipFunctionalize guard; |
8487 | tmp_output = at::_ops::flip::call(self_, dims); |
8488 | } |
8489 | at::functionalization::impl::replace_(out, tmp_output); |
8490 | at::functionalization::impl::commit_update(out); |
8491 | at::functionalization::impl::sync(out); |
8492 | return out; |
8493 | } |
8494 | } |
8495 | |
8496 | at::Tensor & _nested_tensor_from_mask_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out) { |
8497 | if (false) { |
8498 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8499 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8500 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8501 | auto t_meta = to_meta(t); |
8502 | auto mask_meta = to_meta(mask); |
8503 | auto out_meta = to_meta(out); |
8504 | at::AutoDispatchSkipFunctionalize func_guard; |
8505 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8506 | at::_ops::_nested_tensor_from_mask_out::call(t_meta, mask_meta, mask_check, out_meta); |
8507 | } |
8508 | |
8509 | at::Tensor t_; |
8510 | if (at::functionalization::impl::isFunctionalTensor(t)) { |
8511 | at::functionalization::impl::sync(t); |
8512 | t_ = at::functionalization::impl::from_functional_tensor(t); |
8513 | } else { |
8514 | t_ = t; |
8515 | } |
8516 | |
8517 | at::Tensor mask_; |
8518 | if (at::functionalization::impl::isFunctionalTensor(mask)) { |
8519 | at::functionalization::impl::sync(mask); |
8520 | mask_ = at::functionalization::impl::from_functional_tensor(mask); |
8521 | } else { |
8522 | mask_ = mask; |
8523 | } |
8524 | |
8525 | at::Tensor out_; |
8526 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8527 | at::functionalization::impl::sync(out); |
8528 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8529 | } else { |
8530 | out_ = out; |
8531 | } |
8532 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8533 | if ((false || at::functionalization::impl::isFunctionalTensor(t) || at::functionalization::impl::isFunctionalTensor(mask))) { |
8534 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8535 | TORCH_INTERNAL_ASSERT(false, |
8536 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8537 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8538 | } else { |
8539 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8540 | at::AutoDispatchSkipFunctionalize guard; |
8541 | at::Tensor tmp_output = at::_ops::_nested_tensor_from_mask_out::call(t_, mask_, mask_check, out_); |
8542 | return out;; |
8543 | } |
8544 | } else { |
8545 | at::Tensor tmp_output; |
8546 | { |
8547 | at::AutoDispatchSkipFunctionalize guard; |
8548 | tmp_output = at::_ops::_nested_tensor_from_mask::call(t_, mask_, mask_check); |
8549 | } |
8550 | at::functionalization::impl::replace_(out, tmp_output); |
8551 | at::functionalization::impl::commit_update(out); |
8552 | at::functionalization::impl::sync(out); |
8553 | return out; |
8554 | } |
8555 | } |
8556 | |
8557 | at::Tensor & _nested_from_padded_and_nested_example_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out) { |
8558 | if (false) { |
8559 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8560 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8561 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8562 | auto padded_meta = to_meta(padded); |
8563 | auto nt_example_meta = to_meta(nt_example); |
8564 | auto out_meta = to_meta(out); |
8565 | at::AutoDispatchSkipFunctionalize func_guard; |
8566 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8567 | at::_ops::_nested_from_padded_and_nested_example_out::call(padded_meta, nt_example_meta, out_meta); |
8568 | } |
8569 | |
8570 | at::Tensor padded_; |
8571 | if (at::functionalization::impl::isFunctionalTensor(padded)) { |
8572 | at::functionalization::impl::sync(padded); |
8573 | padded_ = at::functionalization::impl::from_functional_tensor(padded); |
8574 | } else { |
8575 | padded_ = padded; |
8576 | } |
8577 | |
8578 | at::Tensor nt_example_; |
8579 | if (at::functionalization::impl::isFunctionalTensor(nt_example)) { |
8580 | at::functionalization::impl::sync(nt_example); |
8581 | nt_example_ = at::functionalization::impl::from_functional_tensor(nt_example); |
8582 | } else { |
8583 | nt_example_ = nt_example; |
8584 | } |
8585 | |
8586 | at::Tensor out_; |
8587 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8588 | at::functionalization::impl::sync(out); |
8589 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8590 | } else { |
8591 | out_ = out; |
8592 | } |
8593 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8594 | if ((false || at::functionalization::impl::isFunctionalTensor(padded) || at::functionalization::impl::isFunctionalTensor(nt_example))) { |
8595 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8596 | TORCH_INTERNAL_ASSERT(false, |
8597 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8598 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8599 | } else { |
8600 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8601 | at::AutoDispatchSkipFunctionalize guard; |
8602 | at::Tensor tmp_output = at::_ops::_nested_from_padded_and_nested_example_out::call(padded_, nt_example_, out_); |
8603 | return out;; |
8604 | } |
8605 | } else { |
8606 | at::Tensor tmp_output; |
8607 | { |
8608 | at::AutoDispatchSkipFunctionalize guard; |
8609 | tmp_output = at::_ops::_nested_from_padded_and_nested_example::call(padded_, nt_example_); |
8610 | } |
8611 | at::functionalization::impl::replace_(out, tmp_output); |
8612 | at::functionalization::impl::commit_update(out); |
8613 | at::functionalization::impl::sync(out); |
8614 | return out; |
8615 | } |
8616 | } |
8617 | |
8618 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
8619 | if (false) { |
8620 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8621 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8622 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8623 | auto self_meta = to_meta(self); |
8624 | auto out0_meta = to_meta(out0); |
8625 | auto out1_meta = to_meta(out1); |
8626 | auto out2_meta = to_meta(out2); |
8627 | at::AutoDispatchSkipFunctionalize func_guard; |
8628 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8629 | at::_ops::unique_dim_out::call(self_meta, dim, sorted, return_inverse, return_counts, out0_meta, out1_meta, out2_meta); |
8630 | } |
8631 | |
8632 | at::Tensor self_; |
8633 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8634 | at::functionalization::impl::sync(self); |
8635 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8636 | } else { |
8637 | self_ = self; |
8638 | } |
8639 | |
8640 | at::Tensor out0_; |
8641 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
8642 | at::functionalization::impl::sync(out0); |
8643 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
8644 | } else { |
8645 | out0_ = out0; |
8646 | } |
8647 | |
8648 | at::Tensor out1_; |
8649 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
8650 | at::functionalization::impl::sync(out1); |
8651 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
8652 | } else { |
8653 | out1_ = out1; |
8654 | } |
8655 | |
8656 | at::Tensor out2_; |
8657 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
8658 | at::functionalization::impl::sync(out2); |
8659 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
8660 | } else { |
8661 | out2_ = out2; |
8662 | } |
8663 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
8664 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8665 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8666 | TORCH_INTERNAL_ASSERT(false, |
8667 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8668 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8669 | } else { |
8670 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8671 | at::AutoDispatchSkipFunctionalize guard; |
8672 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::unique_dim_out::call(self_, dim, sorted, return_inverse, return_counts, out0_, out1_, out2_); |
8673 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);; |
8674 | } |
8675 | } else { |
8676 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
8677 | { |
8678 | at::AutoDispatchSkipFunctionalize guard; |
8679 | tmp_output = at::_ops::unique_dim::call(self_, dim, sorted, return_inverse, return_counts); |
8680 | } |
8681 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
8682 | at::functionalization::impl::commit_update(out0); |
8683 | at::functionalization::impl::sync(out0); |
8684 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
8685 | at::functionalization::impl::commit_update(out1); |
8686 | at::functionalization::impl::sync(out1); |
8687 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
8688 | at::functionalization::impl::commit_update(out2); |
8689 | at::functionalization::impl::sync(out2); |
8690 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
8691 | } |
8692 | } |
8693 | |
8694 | at::Tensor & _unsafe_view_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { |
8695 | if (false) { |
8696 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8697 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8698 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8699 | auto self_meta = to_meta(self); |
8700 | auto out_meta = to_meta(out); |
8701 | at::AutoDispatchSkipFunctionalize func_guard; |
8702 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8703 | at::_ops::_unsafe_view_out::call(self_meta, size, out_meta); |
8704 | } |
8705 | |
8706 | at::Tensor self_; |
8707 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8708 | at::functionalization::impl::sync(self); |
8709 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8710 | } else { |
8711 | self_ = self; |
8712 | } |
8713 | |
8714 | at::Tensor out_; |
8715 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8716 | at::functionalization::impl::sync(out); |
8717 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8718 | } else { |
8719 | out_ = out; |
8720 | } |
8721 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8722 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8723 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8724 | TORCH_INTERNAL_ASSERT(false, |
8725 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8726 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8727 | } else { |
8728 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8729 | at::AutoDispatchSkipFunctionalize guard; |
8730 | at::Tensor tmp_output = at::_ops::_unsafe_view_out::call(self_, size, out_); |
8731 | return out;; |
8732 | } |
8733 | } else { |
8734 | at::Tensor tmp_output; |
8735 | { |
8736 | at::AutoDispatchSkipFunctionalize guard; |
8737 | tmp_output = at::_ops::_unsafe_view::call(self_, size); |
8738 | } |
8739 | at::functionalization::impl::replace_(out, tmp_output); |
8740 | at::functionalization::impl::commit_update(out); |
8741 | at::functionalization::impl::sync(out); |
8742 | return out; |
8743 | } |
8744 | } |
8745 | |
8746 | ::std::tuple<at::Tensor &,at::Tensor &> var_mean_out_correction_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) { |
8747 | if (false) { |
8748 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8749 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8750 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8751 | auto self_meta = to_meta(self); |
8752 | auto out0_meta = to_meta(out0); |
8753 | auto out1_meta = to_meta(out1); |
8754 | at::AutoDispatchSkipFunctionalize func_guard; |
8755 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8756 | at::_ops::var_mean_correction_out::call(self_meta, dim, correction, keepdim, out0_meta, out1_meta); |
8757 | } |
8758 | |
8759 | at::Tensor self_; |
8760 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8761 | at::functionalization::impl::sync(self); |
8762 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8763 | } else { |
8764 | self_ = self; |
8765 | } |
8766 | |
8767 | at::Tensor out0_; |
8768 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
8769 | at::functionalization::impl::sync(out0); |
8770 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
8771 | } else { |
8772 | out0_ = out0; |
8773 | } |
8774 | |
8775 | at::Tensor out1_; |
8776 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
8777 | at::functionalization::impl::sync(out1); |
8778 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
8779 | } else { |
8780 | out1_ = out1; |
8781 | } |
8782 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
8783 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8784 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8785 | TORCH_INTERNAL_ASSERT(false, |
8786 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8787 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8788 | } else { |
8789 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8790 | at::AutoDispatchSkipFunctionalize guard; |
8791 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::var_mean_correction_out::call(self_, dim, correction, keepdim, out0_, out1_); |
8792 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
8793 | } |
8794 | } else { |
8795 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
8796 | { |
8797 | at::AutoDispatchSkipFunctionalize guard; |
8798 | tmp_output = at::_ops::var_mean_correction::call(self_, dim, correction, keepdim); |
8799 | } |
8800 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
8801 | at::functionalization::impl::commit_update(out0); |
8802 | at::functionalization::impl::sync(out0); |
8803 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
8804 | at::functionalization::impl::commit_update(out1); |
8805 | at::functionalization::impl::sync(out1); |
8806 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
8807 | } |
8808 | } |
8809 | |
8810 | at::Tensor & zeros_out_names_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) { |
8811 | if (false) { |
8812 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8813 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8814 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8815 | auto out_meta = to_meta(out); |
8816 | at::AutoDispatchSkipFunctionalize func_guard; |
8817 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8818 | at::_ops::zeros_names_out::call(size, names, out_meta); |
8819 | } |
8820 | |
8821 | at::Tensor out_; |
8822 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8823 | at::functionalization::impl::sync(out); |
8824 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8825 | } else { |
8826 | out_ = out; |
8827 | } |
8828 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8829 | if ((false)) { |
8830 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8831 | TORCH_INTERNAL_ASSERT(false, |
8832 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8833 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8834 | } else { |
8835 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8836 | at::AutoDispatchSkipFunctionalize guard; |
8837 | at::Tensor tmp_output = at::_ops::zeros_names_out::call(size, names, out_); |
8838 | return out;; |
8839 | } |
8840 | } else { |
8841 | at::Tensor tmp_output; |
8842 | { |
8843 | at::AutoDispatchSkipFunctionalize guard; |
8844 | tmp_output = at::_ops::zeros_names::call(size, names, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
8845 | } |
8846 | at::functionalization::impl::replace_(out, tmp_output); |
8847 | at::functionalization::impl::commit_update(out); |
8848 | at::functionalization::impl::sync(out); |
8849 | return out; |
8850 | } |
8851 | } |
8852 | |
8853 | at::Tensor & zeros_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) { |
8854 | if (false) { |
8855 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8856 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8857 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8858 | auto out_meta = to_meta(out); |
8859 | at::AutoDispatchSkipFunctionalize func_guard; |
8860 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8861 | at::_ops::zeros_out::call(size, out_meta); |
8862 | } |
8863 | |
8864 | at::Tensor out_; |
8865 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8866 | at::functionalization::impl::sync(out); |
8867 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8868 | } else { |
8869 | out_ = out; |
8870 | } |
8871 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8872 | if ((false)) { |
8873 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8874 | TORCH_INTERNAL_ASSERT(false, |
8875 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8876 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8877 | } else { |
8878 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8879 | at::AutoDispatchSkipFunctionalize guard; |
8880 | at::Tensor tmp_output = at::_ops::zeros_out::call(size, out_); |
8881 | return out;; |
8882 | } |
8883 | } else { |
8884 | at::Tensor tmp_output; |
8885 | { |
8886 | at::AutoDispatchSkipFunctionalize guard; |
8887 | tmp_output = at::_ops::zeros::call(size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
8888 | } |
8889 | at::functionalization::impl::replace_(out, tmp_output); |
8890 | at::functionalization::impl::commit_update(out); |
8891 | at::functionalization::impl::sync(out); |
8892 | return out; |
8893 | } |
8894 | } |
8895 | |
8896 | at::Tensor & zeros_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
8897 | if (false) { |
8898 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8899 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8900 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8901 | auto self_meta = to_meta(self); |
8902 | auto out_meta = to_meta(out); |
8903 | at::AutoDispatchSkipFunctionalize func_guard; |
8904 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8905 | at::_ops::zeros_like_out::call(self_meta, memory_format, out_meta); |
8906 | } |
8907 | |
8908 | at::Tensor self_; |
8909 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8910 | at::functionalization::impl::sync(self); |
8911 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8912 | } else { |
8913 | self_ = self; |
8914 | } |
8915 | |
8916 | at::Tensor out_; |
8917 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8918 | at::functionalization::impl::sync(out); |
8919 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8920 | } else { |
8921 | out_ = out; |
8922 | } |
8923 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8924 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8925 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8926 | TORCH_INTERNAL_ASSERT(false, |
8927 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8928 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8929 | } else { |
8930 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8931 | at::AutoDispatchSkipFunctionalize guard; |
8932 | at::Tensor tmp_output = at::_ops::zeros_like_out::call(self_, memory_format, out_); |
8933 | return out;; |
8934 | } |
8935 | } else { |
8936 | at::Tensor tmp_output; |
8937 | { |
8938 | at::AutoDispatchSkipFunctionalize guard; |
8939 | tmp_output = at::_ops::zeros_like::call(self_, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, memory_format); |
8940 | } |
8941 | at::functionalization::impl::replace_(out, tmp_output); |
8942 | at::functionalization::impl::commit_update(out); |
8943 | at::functionalization::impl::sync(out); |
8944 | return out; |
8945 | } |
8946 | } |
8947 | |
8948 | at::Tensor & _sparse_csr_prod_out_dim_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
8949 | if (false) { |
8950 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8951 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8952 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8953 | auto self_meta = to_meta(self); |
8954 | auto out_meta = to_meta(out); |
8955 | at::AutoDispatchSkipFunctionalize func_guard; |
8956 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8957 | at::_ops::_sparse_csr_prod_dim_dtype_out::call(self_meta, dim, keepdim, dtype, out_meta); |
8958 | } |
8959 | |
8960 | at::Tensor self_; |
8961 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8962 | at::functionalization::impl::sync(self); |
8963 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8964 | } else { |
8965 | self_ = self; |
8966 | } |
8967 | |
8968 | at::Tensor out_; |
8969 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8970 | at::functionalization::impl::sync(out); |
8971 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8972 | } else { |
8973 | out_ = out; |
8974 | } |
8975 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8976 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8977 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8978 | TORCH_INTERNAL_ASSERT(false, |
8979 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8980 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8981 | } else { |
8982 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8983 | at::AutoDispatchSkipFunctionalize guard; |
8984 | at::Tensor tmp_output = at::_ops::_sparse_csr_prod_dim_dtype_out::call(self_, dim, keepdim, dtype, out_); |
8985 | return out;; |
8986 | } |
8987 | } else { |
8988 | at::Tensor tmp_output; |
8989 | { |
8990 | at::AutoDispatchSkipFunctionalize guard; |
8991 | tmp_output = at::_ops::_sparse_csr_prod_dim_dtype::call(self_, dim, keepdim, dtype); |
8992 | } |
8993 | at::functionalization::impl::replace_(out, tmp_output); |
8994 | at::functionalization::impl::commit_update(out); |
8995 | at::functionalization::impl::sync(out); |
8996 | return out; |
8997 | } |
8998 | } |
8999 | |
9000 | at::Tensor & _spdiags_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout, at::Tensor & out) { |
9001 | if (false) { |
9002 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9003 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9004 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9005 | auto diagonals_meta = to_meta(diagonals); |
9006 | auto offsets_meta = to_meta(offsets); |
9007 | auto out_meta = to_meta(out); |
9008 | at::AutoDispatchSkipFunctionalize func_guard; |
9009 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9010 | at::_ops::_spdiags_out::call(diagonals_meta, offsets_meta, shape, layout, out_meta); |
9011 | } |
9012 | |
9013 | at::Tensor diagonals_; |
9014 | if (at::functionalization::impl::isFunctionalTensor(diagonals)) { |
9015 | at::functionalization::impl::sync(diagonals); |
9016 | diagonals_ = at::functionalization::impl::from_functional_tensor(diagonals); |
9017 | } else { |
9018 | diagonals_ = diagonals; |
9019 | } |
9020 | |
9021 | at::Tensor offsets_; |
9022 | if (at::functionalization::impl::isFunctionalTensor(offsets)) { |
9023 | at::functionalization::impl::sync(offsets); |
9024 | offsets_ = at::functionalization::impl::from_functional_tensor(offsets); |
9025 | } else { |
9026 | offsets_ = offsets; |
9027 | } |
9028 | |
9029 | at::Tensor out_; |
9030 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9031 | at::functionalization::impl::sync(out); |
9032 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9033 | } else { |
9034 | out_ = out; |
9035 | } |
9036 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9037 | if ((false || at::functionalization::impl::isFunctionalTensor(diagonals) || at::functionalization::impl::isFunctionalTensor(offsets))) { |
9038 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9039 | TORCH_INTERNAL_ASSERT(false, |
9040 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9041 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9042 | } else { |
9043 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9044 | at::AutoDispatchSkipFunctionalize guard; |
9045 | at::Tensor tmp_output = at::_ops::_spdiags_out::call(diagonals_, offsets_, shape, layout, out_); |
9046 | return out;; |
9047 | } |
9048 | } else { |
9049 | at::Tensor tmp_output; |
9050 | { |
9051 | at::AutoDispatchSkipFunctionalize guard; |
9052 | tmp_output = at::_ops::_spdiags::call(diagonals_, offsets_, shape, layout); |
9053 | } |
9054 | at::functionalization::impl::replace_(out, tmp_output); |
9055 | at::functionalization::impl::commit_update(out); |
9056 | at::functionalization::impl::sync(out); |
9057 | return out; |
9058 | } |
9059 | } |
9060 | |
9061 | at::Tensor & rsub_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
9062 | if (false) { |
9063 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9064 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9065 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9066 | auto self_meta = to_meta(self); |
9067 | auto other_meta = to_meta(other); |
9068 | auto out_meta = to_meta(out); |
9069 | at::AutoDispatchSkipFunctionalize func_guard; |
9070 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9071 | at::_ops::rsub_Tensor_out::call(self_meta, other_meta, alpha, out_meta); |
9072 | } |
9073 | |
9074 | at::Tensor self_; |
9075 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9076 | at::functionalization::impl::sync(self); |
9077 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9078 | } else { |
9079 | self_ = self; |
9080 | } |
9081 | |
9082 | at::Tensor other_; |
9083 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
9084 | at::functionalization::impl::sync(other); |
9085 | other_ = at::functionalization::impl::from_functional_tensor(other); |
9086 | } else { |
9087 | other_ = other; |
9088 | } |
9089 | |
9090 | at::Tensor out_; |
9091 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9092 | at::functionalization::impl::sync(out); |
9093 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9094 | } else { |
9095 | out_ = out; |
9096 | } |
9097 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9098 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
9099 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9100 | TORCH_INTERNAL_ASSERT(false, |
9101 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9102 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9103 | } else { |
9104 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9105 | at::AutoDispatchSkipFunctionalize guard; |
9106 | at::Tensor tmp_output = at::_ops::rsub_Tensor_out::call(self_, other_, alpha, out_); |
9107 | return out;; |
9108 | } |
9109 | } else { |
9110 | at::Tensor tmp_output; |
9111 | { |
9112 | at::AutoDispatchSkipFunctionalize guard; |
9113 | tmp_output = at::_ops::rsub_Tensor::call(self_, other_, alpha); |
9114 | } |
9115 | at::functionalization::impl::replace_(out, tmp_output); |
9116 | at::functionalization::impl::commit_update(out); |
9117 | at::functionalization::impl::sync(out); |
9118 | return out; |
9119 | } |
9120 | } |
9121 | |
9122 | at::Tensor & rsub_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
9123 | if (false) { |
9124 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9125 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9126 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9127 | auto self_meta = to_meta(self); |
9128 | auto out_meta = to_meta(out); |
9129 | at::AutoDispatchSkipFunctionalize func_guard; |
9130 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9131 | at::_ops::rsub_Scalar_out::call(self_meta, other, alpha, out_meta); |
9132 | } |
9133 | |
9134 | at::Tensor self_; |
9135 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9136 | at::functionalization::impl::sync(self); |
9137 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9138 | } else { |
9139 | self_ = self; |
9140 | } |
9141 | |
9142 | at::Tensor out_; |
9143 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9144 | at::functionalization::impl::sync(out); |
9145 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9146 | } else { |
9147 | out_ = out; |
9148 | } |
9149 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9150 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9151 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9152 | TORCH_INTERNAL_ASSERT(false, |
9153 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9154 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9155 | } else { |
9156 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9157 | at::AutoDispatchSkipFunctionalize guard; |
9158 | at::Tensor tmp_output = at::_ops::rsub_Scalar_out::call(self_, other, alpha, out_); |
9159 | return out;; |
9160 | } |
9161 | } else { |
9162 | at::Tensor tmp_output; |
9163 | { |
9164 | at::AutoDispatchSkipFunctionalize guard; |
9165 | tmp_output = at::_ops::rsub_Scalar::call(self_, other, alpha); |
9166 | } |
9167 | at::functionalization::impl::replace_(out, tmp_output); |
9168 | at::functionalization::impl::commit_update(out); |
9169 | at::functionalization::impl::sync(out); |
9170 | return out; |
9171 | } |
9172 | } |
9173 | |
9174 | at::Tensor & _sparse_addmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
9175 | if (false) { |
9176 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9177 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9178 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9179 | auto self_meta = to_meta(self); |
9180 | auto mat1_meta = to_meta(mat1); |
9181 | auto mat2_meta = to_meta(mat2); |
9182 | auto out_meta = to_meta(out); |
9183 | at::AutoDispatchSkipFunctionalize func_guard; |
9184 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9185 | at::_ops::_sparse_addmm_out::call(self_meta, mat1_meta, mat2_meta, beta, alpha, out_meta); |
9186 | } |
9187 | |
9188 | at::Tensor self_; |
9189 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9190 | at::functionalization::impl::sync(self); |
9191 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9192 | } else { |
9193 | self_ = self; |
9194 | } |
9195 | |
9196 | at::Tensor mat1_; |
9197 | if (at::functionalization::impl::isFunctionalTensor(mat1)) { |
9198 | at::functionalization::impl::sync(mat1); |
9199 | mat1_ = at::functionalization::impl::from_functional_tensor(mat1); |
9200 | } else { |
9201 | mat1_ = mat1; |
9202 | } |
9203 | |
9204 | at::Tensor mat2_; |
9205 | if (at::functionalization::impl::isFunctionalTensor(mat2)) { |
9206 | at::functionalization::impl::sync(mat2); |
9207 | mat2_ = at::functionalization::impl::from_functional_tensor(mat2); |
9208 | } else { |
9209 | mat2_ = mat2; |
9210 | } |
9211 | |
9212 | at::Tensor out_; |
9213 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9214 | at::functionalization::impl::sync(out); |
9215 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9216 | } else { |
9217 | out_ = out; |
9218 | } |
9219 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9220 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) { |
9221 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9222 | TORCH_INTERNAL_ASSERT(false, |
9223 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9224 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9225 | } else { |
9226 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9227 | at::AutoDispatchSkipFunctionalize guard; |
9228 | at::Tensor tmp_output = at::_ops::_sparse_addmm_out::call(self_, mat1_, mat2_, beta, alpha, out_); |
9229 | return out;; |
9230 | } |
9231 | } else { |
9232 | at::Tensor tmp_output; |
9233 | { |
9234 | at::AutoDispatchSkipFunctionalize guard; |
9235 | tmp_output = at::_ops::_sparse_addmm::call(self_, mat1_, mat2_, beta, alpha); |
9236 | } |
9237 | at::functionalization::impl::replace_(out, tmp_output); |
9238 | at::functionalization::impl::commit_update(out); |
9239 | at::functionalization::impl::sync(out); |
9240 | return out; |
9241 | } |
9242 | } |
9243 | |
9244 | at::Tensor & sparse_coo_tensor_out_size_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { |
9245 | if (false) { |
9246 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9247 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9248 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9249 | auto out_meta = to_meta(out); |
9250 | at::AutoDispatchSkipFunctionalize func_guard; |
9251 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9252 | at::_ops::sparse_coo_tensor_size_out::call(size, out_meta); |
9253 | } |
9254 | |
9255 | at::Tensor out_; |
9256 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9257 | at::functionalization::impl::sync(out); |
9258 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9259 | } else { |
9260 | out_ = out; |
9261 | } |
9262 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9263 | if ((false)) { |
9264 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9265 | TORCH_INTERNAL_ASSERT(false, |
9266 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9267 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9268 | } else { |
9269 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9270 | at::AutoDispatchSkipFunctionalize guard; |
9271 | at::Tensor tmp_output = at::_ops::sparse_coo_tensor_size_out::call(size, out_); |
9272 | return out;; |
9273 | } |
9274 | } else { |
9275 | at::Tensor tmp_output; |
9276 | { |
9277 | at::AutoDispatchSkipFunctionalize guard; |
9278 | tmp_output = at::_ops::sparse_coo_tensor_size::call(size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
9279 | } |
9280 | at::functionalization::impl::replace_(out, tmp_output); |
9281 | at::functionalization::impl::commit_update(out); |
9282 | at::functionalization::impl::sync(out); |
9283 | return out; |
9284 | } |
9285 | } |
9286 | |
9287 | const at::Tensor & sparse_resize_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) { |
9288 | if (false) { |
9289 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9290 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9291 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9292 | auto self_meta = to_meta(self); |
9293 | auto out_meta = to_meta(out); |
9294 | at::AutoDispatchSkipFunctionalize func_guard; |
9295 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9296 | at::_ops::sparse_resize_out::call(self_meta, size, sparse_dim, dense_dim, out_meta); |
9297 | } |
9298 | |
9299 | at::Tensor self_; |
9300 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9301 | at::functionalization::impl::sync(self); |
9302 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9303 | } else { |
9304 | self_ = self; |
9305 | } |
9306 | |
9307 | at::Tensor out_; |
9308 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9309 | at::functionalization::impl::sync(out); |
9310 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9311 | } else { |
9312 | out_ = out; |
9313 | } |
9314 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9315 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9316 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9317 | TORCH_INTERNAL_ASSERT(false, |
9318 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9319 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9320 | } else { |
9321 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9322 | at::AutoDispatchSkipFunctionalize guard; |
9323 | at::Tensor tmp_output = at::_ops::sparse_resize_out::call(self_, size, sparse_dim, dense_dim, out_); |
9324 | return out;; |
9325 | } |
9326 | } else { |
9327 | at::Tensor tmp_output; |
9328 | { |
9329 | at::AutoDispatchSkipFunctionalize guard; |
9330 | tmp_output = at::_ops::sparse_resize::call(self_, size, sparse_dim, dense_dim); |
9331 | } |
9332 | at::functionalization::impl::replace_(out, tmp_output); |
9333 | at::functionalization::impl::commit_update(out); |
9334 | at::functionalization::impl::sync(out); |
9335 | return out; |
9336 | } |
9337 | } |
9338 | |
9339 | const at::Tensor & sparse_resize_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { |
9340 | if (true) { |
9341 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9342 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9343 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9344 | auto self_meta = to_meta(self); |
9345 | at::AutoDispatchSkipFunctionalize func_guard; |
9346 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9347 | at::_ops::sparse_resize_::call(self_meta, size, sparse_dim, dense_dim); |
9348 | } |
9349 | |
9350 | at::Tensor self_; |
9351 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9352 | at::functionalization::impl::sync(self); |
9353 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9354 | } else { |
9355 | self_ = self; |
9356 | } |
9357 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
9358 | if ((false)) { |
9359 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9360 | TORCH_INTERNAL_ASSERT(false, |
9361 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9362 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9363 | } else { |
9364 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9365 | at::AutoDispatchSkipFunctionalize guard; |
9366 | at::Tensor tmp_output = at::_ops::sparse_resize_::call(self_, size, sparse_dim, dense_dim); |
9367 | return self;; |
9368 | } |
9369 | } else { |
9370 | at::Tensor tmp_output; |
9371 | { |
9372 | at::AutoDispatchSkipFunctionalize guard; |
9373 | tmp_output = at::_ops::sparse_resize::call(self_, size, sparse_dim, dense_dim); |
9374 | } |
9375 | at::functionalization::impl::replace_(self, tmp_output); |
9376 | at::functionalization::impl::commit_update(self); |
9377 | at::functionalization::impl::sync(self); |
9378 | return self; |
9379 | } |
9380 | } |
9381 | |
9382 | at::Tensor & sparse_mask_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) { |
9383 | if (false) { |
9384 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9385 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9386 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9387 | auto self_meta = to_meta(self); |
9388 | auto mask_meta = to_meta(mask); |
9389 | auto out_meta = to_meta(out); |
9390 | at::AutoDispatchSkipFunctionalize func_guard; |
9391 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9392 | at::_ops::sparse_mask_out::call(self_meta, mask_meta, out_meta); |
9393 | } |
9394 | |
9395 | at::Tensor self_; |
9396 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9397 | at::functionalization::impl::sync(self); |
9398 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9399 | } else { |
9400 | self_ = self; |
9401 | } |
9402 | |
9403 | at::Tensor mask_; |
9404 | if (at::functionalization::impl::isFunctionalTensor(mask)) { |
9405 | at::functionalization::impl::sync(mask); |
9406 | mask_ = at::functionalization::impl::from_functional_tensor(mask); |
9407 | } else { |
9408 | mask_ = mask; |
9409 | } |
9410 | |
9411 | at::Tensor out_; |
9412 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9413 | at::functionalization::impl::sync(out); |
9414 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9415 | } else { |
9416 | out_ = out; |
9417 | } |
9418 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9419 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask))) { |
9420 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9421 | TORCH_INTERNAL_ASSERT(false, |
9422 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9423 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9424 | } else { |
9425 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9426 | at::AutoDispatchSkipFunctionalize guard; |
9427 | at::Tensor tmp_output = at::_ops::sparse_mask_out::call(self_, mask_, out_); |
9428 | return out;; |
9429 | } |
9430 | } else { |
9431 | at::Tensor tmp_output; |
9432 | { |
9433 | at::AutoDispatchSkipFunctionalize guard; |
9434 | tmp_output = at::_ops::sparse_mask::call(self_, mask_); |
9435 | } |
9436 | at::functionalization::impl::replace_(out, tmp_output); |
9437 | at::functionalization::impl::commit_update(out); |
9438 | at::functionalization::impl::sync(out); |
9439 | return out; |
9440 | } |
9441 | } |
9442 | |
9443 | at::Tensor & _coalesce_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
9444 | if (false) { |
9445 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9446 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9447 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9448 | auto self_meta = to_meta(self); |
9449 | auto out_meta = to_meta(out); |
9450 | at::AutoDispatchSkipFunctionalize func_guard; |
9451 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9452 | at::_ops::_coalesce_out::call(self_meta, out_meta); |
9453 | } |
9454 | |
9455 | at::Tensor self_; |
9456 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9457 | at::functionalization::impl::sync(self); |
9458 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9459 | } else { |
9460 | self_ = self; |
9461 | } |
9462 | |
9463 | at::Tensor out_; |
9464 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9465 | at::functionalization::impl::sync(out); |
9466 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9467 | } else { |
9468 | out_ = out; |
9469 | } |
9470 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9471 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9472 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9473 | TORCH_INTERNAL_ASSERT(false, |
9474 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9475 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9476 | } else { |
9477 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9478 | at::AutoDispatchSkipFunctionalize guard; |
9479 | at::Tensor tmp_output = at::_ops::_coalesce_out::call(self_, out_); |
9480 | return out;; |
9481 | } |
9482 | } else { |
9483 | at::Tensor tmp_output; |
9484 | { |
9485 | at::AutoDispatchSkipFunctionalize guard; |
9486 | tmp_output = at::_ops::_coalesce::call(self_); |
9487 | } |
9488 | at::functionalization::impl::replace_(out, tmp_output); |
9489 | at::functionalization::impl::commit_update(out); |
9490 | at::functionalization::impl::sync(out); |
9491 | return out; |
9492 | } |
9493 | } |
9494 | |
9495 | at::Tensor & dequantize_out_self_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
9496 | if (false) { |
9497 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9498 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9499 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9500 | auto self_meta = to_meta(self); |
9501 | auto out_meta = to_meta(out); |
9502 | at::AutoDispatchSkipFunctionalize func_guard; |
9503 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9504 | at::_ops::dequantize_self_out::call(self_meta, out_meta); |
9505 | } |
9506 | |
9507 | at::Tensor self_; |
9508 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9509 | at::functionalization::impl::sync(self); |
9510 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9511 | } else { |
9512 | self_ = self; |
9513 | } |
9514 | |
9515 | at::Tensor out_; |
9516 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9517 | at::functionalization::impl::sync(out); |
9518 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9519 | } else { |
9520 | out_ = out; |
9521 | } |
9522 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9523 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9524 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9525 | TORCH_INTERNAL_ASSERT(false, |
9526 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9527 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9528 | } else { |
9529 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9530 | at::AutoDispatchSkipFunctionalize guard; |
9531 | at::Tensor tmp_output = at::_ops::dequantize_self_out::call(self_, out_); |
9532 | return out;; |
9533 | } |
9534 | } else { |
9535 | at::Tensor tmp_output; |
9536 | { |
9537 | at::AutoDispatchSkipFunctionalize guard; |
9538 | tmp_output = at::_ops::dequantize_self::call(self_); |
9539 | } |
9540 | at::functionalization::impl::replace_(out, tmp_output); |
9541 | at::functionalization::impl::commit_update(out); |
9542 | at::functionalization::impl::sync(out); |
9543 | return out; |
9544 | } |
9545 | } |
9546 | |
9547 | void dequantize_out_tensors_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::TensorList out) { |
9548 | if (false) { |
9549 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9550 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9551 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9552 | auto tensors_meta = to_meta(tensors); |
9553 | auto out_meta = to_meta(out); |
9554 | at::AutoDispatchSkipFunctionalize func_guard; |
9555 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9556 | at::_ops::dequantize_tensors_out::call(tensors_meta, out_meta); |
9557 | } |
9558 | |
9559 | ::std::vector<at::Tensor> tensors_; |
9560 | if (at::functionalization::impl::isFunctionalTensor(tensors)) { |
9561 | at::functionalization::impl::sync(tensors); |
9562 | tensors_ = at::functionalization::impl::from_functional_tensor(tensors); |
9563 | } else { |
9564 | tensors_ = tensors.vec(); |
9565 | } |
9566 | |
9567 | ::std::vector<at::Tensor> out_; |
9568 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9569 | at::functionalization::impl::sync(out); |
9570 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9571 | } else { |
9572 | out_ = out.vec(); |
9573 | } |
9574 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9575 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) { |
9576 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9577 | TORCH_INTERNAL_ASSERT(false, |
9578 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9579 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9580 | } else { |
9581 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9582 | at::AutoDispatchSkipFunctionalize guard; |
9583 | at::_ops::dequantize_tensors_out::call(tensors_, out_); |
9584 | ; |
9585 | } |
9586 | } else { |
9587 | ::std::vector<at::Tensor> tmp_output; |
9588 | { |
9589 | at::AutoDispatchSkipFunctionalize guard; |
9590 | tmp_output = at::_ops::dequantize_tensors::call(tensors_); |
9591 | } |
9592 | at::functionalization::impl::replace_(out, tmp_output); |
9593 | at::functionalization::impl::commit_update(out); |
9594 | at::functionalization::impl::sync(out); |
9595 | |
9596 | } |
9597 | } |
9598 | |
9599 | at::Tensor & q_per_channel_zero_points_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
9600 | if (false) { |
9601 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9602 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9603 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9604 | auto self_meta = to_meta(self); |
9605 | auto out_meta = to_meta(out); |
9606 | at::AutoDispatchSkipFunctionalize func_guard; |
9607 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9608 | at::_ops::q_per_channel_zero_points_out::call(self_meta, out_meta); |
9609 | } |
9610 | |
9611 | at::Tensor self_; |
9612 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9613 | at::functionalization::impl::sync(self); |
9614 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9615 | } else { |
9616 | self_ = self; |
9617 | } |
9618 | |
9619 | at::Tensor out_; |
9620 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9621 | at::functionalization::impl::sync(out); |
9622 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9623 | } else { |
9624 | out_ = out; |
9625 | } |
9626 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9627 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9628 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9629 | TORCH_INTERNAL_ASSERT(false, |
9630 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9631 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9632 | } else { |
9633 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9634 | at::AutoDispatchSkipFunctionalize guard; |
9635 | at::Tensor tmp_output = at::_ops::q_per_channel_zero_points_out::call(self_, out_); |
9636 | return out;; |
9637 | } |
9638 | } else { |
9639 | at::Tensor tmp_output; |
9640 | { |
9641 | at::AutoDispatchSkipFunctionalize guard; |
9642 | tmp_output = at::_ops::q_per_channel_zero_points::call(self_); |
9643 | } |
9644 | at::functionalization::impl::replace_(out, tmp_output); |
9645 | at::functionalization::impl::commit_update(out); |
9646 | at::functionalization::impl::sync(out); |
9647 | return out; |
9648 | } |
9649 | } |
9650 | |
9651 | at::Tensor & _fake_quantize_learnable_per_channel_affine_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) { |
9652 | if (false) { |
9653 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9654 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9655 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9656 | auto self_meta = to_meta(self); |
9657 | auto scale_meta = to_meta(scale); |
9658 | auto zero_point_meta = to_meta(zero_point); |
9659 | auto out_meta = to_meta(out); |
9660 | at::AutoDispatchSkipFunctionalize func_guard; |
9661 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9662 | at::_ops::_fake_quantize_learnable_per_channel_affine_out::call(self_meta, scale_meta, zero_point_meta, axis, quant_min, quant_max, grad_factor, out_meta); |
9663 | } |
9664 | |
9665 | at::Tensor self_; |
9666 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9667 | at::functionalization::impl::sync(self); |
9668 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9669 | } else { |
9670 | self_ = self; |
9671 | } |
9672 | |
9673 | at::Tensor scale_; |
9674 | if (at::functionalization::impl::isFunctionalTensor(scale)) { |
9675 | at::functionalization::impl::sync(scale); |
9676 | scale_ = at::functionalization::impl::from_functional_tensor(scale); |
9677 | } else { |
9678 | scale_ = scale; |
9679 | } |
9680 | |
9681 | at::Tensor zero_point_; |
9682 | if (at::functionalization::impl::isFunctionalTensor(zero_point)) { |
9683 | at::functionalization::impl::sync(zero_point); |
9684 | zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point); |
9685 | } else { |
9686 | zero_point_ = zero_point; |
9687 | } |
9688 | |
9689 | at::Tensor out_; |
9690 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9691 | at::functionalization::impl::sync(out); |
9692 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9693 | } else { |
9694 | out_ = out; |
9695 | } |
9696 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9697 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scale) || at::functionalization::impl::isFunctionalTensor(zero_point))) { |
9698 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9699 | TORCH_INTERNAL_ASSERT(false, |
9700 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9701 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9702 | } else { |
9703 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9704 | at::AutoDispatchSkipFunctionalize guard; |
9705 | at::Tensor tmp_output = at::_ops::_fake_quantize_learnable_per_channel_affine_out::call(self_, scale_, zero_point_, axis, quant_min, quant_max, grad_factor, out_); |
9706 | return out;; |
9707 | } |
9708 | } else { |
9709 | at::Tensor tmp_output; |
9710 | { |
9711 | at::AutoDispatchSkipFunctionalize guard; |
9712 | tmp_output = at::_ops::_fake_quantize_learnable_per_channel_affine::call(self_, scale_, zero_point_, axis, quant_min, quant_max, grad_factor); |
9713 | } |
9714 | at::functionalization::impl::replace_(out, tmp_output); |
9715 | at::functionalization::impl::commit_update(out); |
9716 | at::functionalization::impl::sync(out); |
9717 | return out; |
9718 | } |
9719 | } |
9720 | |
9721 | ::std::tuple<at::Tensor &,at::Tensor &> _fused_moving_avg_obs_fq_helper_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1) { |
9722 | if (false) { |
9723 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9724 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9725 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9726 | auto self_meta = to_meta(self); |
9727 | auto observer_on_meta = to_meta(observer_on); |
9728 | auto fake_quant_on_meta = to_meta(fake_quant_on); |
9729 | auto running_min_meta = to_meta(running_min); |
9730 | auto running_max_meta = to_meta(running_max); |
9731 | auto scale_meta = to_meta(scale); |
9732 | auto zero_point_meta = to_meta(zero_point); |
9733 | auto out0_meta = to_meta(out0); |
9734 | auto out1_meta = to_meta(out1); |
9735 | at::AutoDispatchSkipFunctionalize func_guard; |
9736 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9737 | at::_ops::_fused_moving_avg_obs_fq_helper_out::call(self_meta, observer_on_meta, fake_quant_on_meta, running_min_meta, running_max_meta, scale_meta, zero_point_meta, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0_meta, out1_meta); |
9738 | } |
9739 | |
9740 | at::Tensor self_; |
9741 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9742 | at::functionalization::impl::sync(self); |
9743 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9744 | } else { |
9745 | self_ = self; |
9746 | } |
9747 | |
9748 | at::Tensor observer_on_; |
9749 | if (at::functionalization::impl::isFunctionalTensor(observer_on)) { |
9750 | at::functionalization::impl::sync(observer_on); |
9751 | observer_on_ = at::functionalization::impl::from_functional_tensor(observer_on); |
9752 | } else { |
9753 | observer_on_ = observer_on; |
9754 | } |
9755 | |
9756 | at::Tensor fake_quant_on_; |
9757 | if (at::functionalization::impl::isFunctionalTensor(fake_quant_on)) { |
9758 | at::functionalization::impl::sync(fake_quant_on); |
9759 | fake_quant_on_ = at::functionalization::impl::from_functional_tensor(fake_quant_on); |
9760 | } else { |
9761 | fake_quant_on_ = fake_quant_on; |
9762 | } |
9763 | |
9764 | at::Tensor running_min_; |
9765 | if (at::functionalization::impl::isFunctionalTensor(running_min)) { |
9766 | at::functionalization::impl::sync(running_min); |
9767 | running_min_ = at::functionalization::impl::from_functional_tensor(running_min); |
9768 | } else { |
9769 | running_min_ = running_min; |
9770 | } |
9771 | |
9772 | at::Tensor running_max_; |
9773 | if (at::functionalization::impl::isFunctionalTensor(running_max)) { |
9774 | at::functionalization::impl::sync(running_max); |
9775 | running_max_ = at::functionalization::impl::from_functional_tensor(running_max); |
9776 | } else { |
9777 | running_max_ = running_max; |
9778 | } |
9779 | |
9780 | at::Tensor scale_; |
9781 | if (at::functionalization::impl::isFunctionalTensor(scale)) { |
9782 | at::functionalization::impl::sync(scale); |
9783 | scale_ = at::functionalization::impl::from_functional_tensor(scale); |
9784 | } else { |
9785 | scale_ = scale; |
9786 | } |
9787 | |
9788 | at::Tensor zero_point_; |
9789 | if (at::functionalization::impl::isFunctionalTensor(zero_point)) { |
9790 | at::functionalization::impl::sync(zero_point); |
9791 | zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point); |
9792 | } else { |
9793 | zero_point_ = zero_point; |
9794 | } |
9795 | |
9796 | at::Tensor out0_; |
9797 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
9798 | at::functionalization::impl::sync(out0); |
9799 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
9800 | } else { |
9801 | out0_ = out0; |
9802 | } |
9803 | |
9804 | at::Tensor out1_; |
9805 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
9806 | at::functionalization::impl::sync(out1); |
9807 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
9808 | } else { |
9809 | out1_ = out1; |
9810 | } |
9811 | if (!(true && at::functionalization::impl::isFunctionalTensor(running_min) && at::functionalization::impl::isFunctionalTensor(running_max) && at::functionalization::impl::isFunctionalTensor(scale) && at::functionalization::impl::isFunctionalTensor(zero_point) && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
9812 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(observer_on) || at::functionalization::impl::isFunctionalTensor(fake_quant_on))) { |
9813 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9814 | TORCH_INTERNAL_ASSERT(false, |
9815 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9816 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9817 | } else { |
9818 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9819 | at::AutoDispatchSkipFunctionalize guard; |
9820 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_fused_moving_avg_obs_fq_helper_out::call(self_, observer_on_, fake_quant_on_, running_min_, running_max_, scale_, zero_point_, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0_, out1_); |
9821 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
9822 | } |
9823 | } else { |
9824 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output; |
9825 | { |
9826 | at::AutoDispatchSkipFunctionalize guard; |
9827 | tmp_output = at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self_, observer_on_, fake_quant_on_, running_min_, running_max_, scale_, zero_point_, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); |
9828 | } |
9829 | at::functionalization::impl::replace_(running_min, std::get<0>(tmp_output)); |
9830 | at::functionalization::impl::commit_update(running_min); |
9831 | at::functionalization::impl::sync(running_min); |
9832 | at::functionalization::impl::replace_(running_max, std::get<1>(tmp_output)); |
9833 | at::functionalization::impl::commit_update(running_max); |
9834 | at::functionalization::impl::sync(running_max); |
9835 | at::functionalization::impl::replace_(scale, std::get<2>(tmp_output)); |
9836 | at::functionalization::impl::commit_update(scale); |
9837 | at::functionalization::impl::sync(scale); |
9838 | at::functionalization::impl::replace_(zero_point, std::get<3>(tmp_output)); |
9839 | at::functionalization::impl::commit_update(zero_point); |
9840 | at::functionalization::impl::sync(zero_point); |
9841 | at::functionalization::impl::replace_(out0, std::get<4>(tmp_output)); |
9842 | at::functionalization::impl::commit_update(out0); |
9843 | at::functionalization::impl::sync(out0); |
9844 | at::functionalization::impl::replace_(out1, std::get<5>(tmp_output)); |
9845 | at::functionalization::impl::commit_update(out1); |
9846 | at::functionalization::impl::sync(out1); |
9847 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
9848 | } |
9849 | } |
9850 | |
9851 | ::std::tuple<at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) { |
9852 | if (false) { |
9853 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9854 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9855 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9856 | auto self_meta = to_meta(self); |
9857 | auto observer_on_meta = to_meta(observer_on); |
9858 | auto fake_quant_on_meta = to_meta(fake_quant_on); |
9859 | auto running_min_meta = to_meta(running_min); |
9860 | auto running_max_meta = to_meta(running_max); |
9861 | auto scale_meta = to_meta(scale); |
9862 | auto zero_point_meta = to_meta(zero_point); |
9863 | at::AutoDispatchSkipFunctionalize func_guard; |
9864 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9865 | at::_ops::_fused_moving_avg_obs_fq_helper::call(self_meta, observer_on_meta, fake_quant_on_meta, running_min_meta, running_max_meta, scale_meta, zero_point_meta, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); |
9866 | } |
9867 | |
9868 | at::Tensor self_; |
9869 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9870 | at::functionalization::impl::sync(self); |
9871 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9872 | } else { |
9873 | self_ = self; |
9874 | } |
9875 | |
9876 | at::Tensor observer_on_; |
9877 | if (at::functionalization::impl::isFunctionalTensor(observer_on)) { |
9878 | at::functionalization::impl::sync(observer_on); |
9879 | observer_on_ = at::functionalization::impl::from_functional_tensor(observer_on); |
9880 | } else { |
9881 | observer_on_ = observer_on; |
9882 | } |
9883 | |
9884 | at::Tensor fake_quant_on_; |
9885 | if (at::functionalization::impl::isFunctionalTensor(fake_quant_on)) { |
9886 | at::functionalization::impl::sync(fake_quant_on); |
9887 | fake_quant_on_ = at::functionalization::impl::from_functional_tensor(fake_quant_on); |
9888 | } else { |
9889 | fake_quant_on_ = fake_quant_on; |
9890 | } |
9891 | |
9892 | at::Tensor running_min_; |
9893 | if (at::functionalization::impl::isFunctionalTensor(running_min)) { |
9894 | at::functionalization::impl::sync(running_min); |
9895 | running_min_ = at::functionalization::impl::from_functional_tensor(running_min); |
9896 | } else { |
9897 | running_min_ = running_min; |
9898 | } |
9899 | |
9900 | at::Tensor running_max_; |
9901 | if (at::functionalization::impl::isFunctionalTensor(running_max)) { |
9902 | at::functionalization::impl::sync(running_max); |
9903 | running_max_ = at::functionalization::impl::from_functional_tensor(running_max); |
9904 | } else { |
9905 | running_max_ = running_max; |
9906 | } |
9907 | |
9908 | at::Tensor scale_; |
9909 | if (at::functionalization::impl::isFunctionalTensor(scale)) { |
9910 | at::functionalization::impl::sync(scale); |
9911 | scale_ = at::functionalization::impl::from_functional_tensor(scale); |
9912 | } else { |
9913 | scale_ = scale; |
9914 | } |
9915 | |
9916 | at::Tensor zero_point_; |
9917 | if (at::functionalization::impl::isFunctionalTensor(zero_point)) { |
9918 | at::functionalization::impl::sync(zero_point); |
9919 | zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point); |
9920 | } else { |
9921 | zero_point_ = zero_point; |
9922 | } |
9923 | if (!(true && at::functionalization::impl::isFunctionalTensor(running_min) && at::functionalization::impl::isFunctionalTensor(running_max) && at::functionalization::impl::isFunctionalTensor(scale) && at::functionalization::impl::isFunctionalTensor(zero_point))) { |
9924 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(observer_on) || at::functionalization::impl::isFunctionalTensor(fake_quant_on))) { |
9925 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9926 | TORCH_INTERNAL_ASSERT(false, |
9927 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9928 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9929 | } else { |
9930 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9931 | at::AutoDispatchSkipFunctionalize guard; |
9932 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_fused_moving_avg_obs_fq_helper::call(self_, observer_on_, fake_quant_on_, running_min_, running_max_, scale_, zero_point_, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); |
9933 | return ::std::tuple<at::Tensor,at::Tensor>(std::get<0>(tmp_output), std::get<1>(tmp_output));; |
9934 | } |
9935 | } else { |
9936 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output; |
9937 | { |
9938 | at::AutoDispatchSkipFunctionalize guard; |
9939 | tmp_output = at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self_, observer_on_, fake_quant_on_, running_min_, running_max_, scale_, zero_point_, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); |
9940 | } |
9941 | auto output_0 = at::functionalization::impl::to_functional_tensor(std::get<0>(tmp_output)); |
9942 | auto output_1 = at::functionalization::impl::to_functional_tensor(std::get<1>(tmp_output)); |
9943 | at::functionalization::impl::replace_(running_min, std::get<2>(tmp_output)); |
9944 | at::functionalization::impl::commit_update(running_min); |
9945 | at::functionalization::impl::sync(running_min); |
9946 | at::functionalization::impl::replace_(running_max, std::get<3>(tmp_output)); |
9947 | at::functionalization::impl::commit_update(running_max); |
9948 | at::functionalization::impl::sync(running_max); |
9949 | at::functionalization::impl::replace_(scale, std::get<4>(tmp_output)); |
9950 | at::functionalization::impl::commit_update(scale); |
9951 | at::functionalization::impl::sync(scale); |
9952 | at::functionalization::impl::replace_(zero_point, std::get<5>(tmp_output)); |
9953 | at::functionalization::impl::commit_update(zero_point); |
9954 | at::functionalization::impl::sync(zero_point); |
9955 | return ::std::tuple<at::Tensor,at::Tensor>(output_0, output_1); |
9956 | } |
9957 | } |
9958 | |
9959 | at::Tensor & _to_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
9960 | if (false) { |
9961 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9962 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9963 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9964 | auto self_meta = to_meta(self); |
9965 | auto out_meta = to_meta(out); |
9966 | at::AutoDispatchSkipFunctionalize func_guard; |
9967 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9968 | at::_ops::_to_copy_out::call(self_meta, non_blocking, memory_format, out_meta); |
9969 | } |
9970 | |
9971 | at::Tensor self_; |
9972 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9973 | at::functionalization::impl::sync(self); |
9974 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9975 | } else { |
9976 | self_ = self; |
9977 | } |
9978 | |
9979 | at::Tensor out_; |
9980 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9981 | at::functionalization::impl::sync(out); |
9982 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9983 | } else { |
9984 | out_ = out; |
9985 | } |
9986 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9987 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9988 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9989 | TORCH_INTERNAL_ASSERT(false, |
9990 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9991 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9992 | } else { |
9993 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9994 | at::AutoDispatchSkipFunctionalize guard; |
9995 | at::Tensor tmp_output = at::_ops::_to_copy_out::call(self_, non_blocking, memory_format, out_); |
9996 | return out;; |
9997 | } |
9998 | } else { |
9999 | at::Tensor tmp_output; |
10000 | { |
10001 | at::AutoDispatchSkipFunctionalize guard; |
10002 | tmp_output = at::_ops::_to_copy::call(self_, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, non_blocking, memory_format); |
10003 | } |
10004 | at::functionalization::impl::replace_(out, tmp_output); |
10005 | at::functionalization::impl::commit_update(out); |
10006 | at::functionalization::impl::sync(out); |
10007 | return out; |
10008 | } |
10009 | } |
10010 | |
10011 | ::std::tuple<at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1) { |
10012 | if (false) { |
10013 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10014 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10015 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10016 | auto input_gates_meta = to_meta(input_gates); |
10017 | auto hidden_gates_meta = to_meta(hidden_gates); |
10018 | auto hx_meta = to_meta(hx); |
10019 | auto input_bias_meta = to_meta(input_bias); |
10020 | auto hidden_bias_meta = to_meta(hidden_bias); |
10021 | auto out0_meta = to_meta(out0); |
10022 | auto out1_meta = to_meta(out1); |
10023 | at::AutoDispatchSkipFunctionalize func_guard; |
10024 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10025 | at::_ops::_thnn_fused_gru_cell_out::call(input_gates_meta, hidden_gates_meta, hx_meta, input_bias_meta, hidden_bias_meta, out0_meta, out1_meta); |
10026 | } |
10027 | |
10028 | at::Tensor input_gates_; |
10029 | if (at::functionalization::impl::isFunctionalTensor(input_gates)) { |
10030 | at::functionalization::impl::sync(input_gates); |
10031 | input_gates_ = at::functionalization::impl::from_functional_tensor(input_gates); |
10032 | } else { |
10033 | input_gates_ = input_gates; |
10034 | } |
10035 | |
10036 | at::Tensor hidden_gates_; |
10037 | if (at::functionalization::impl::isFunctionalTensor(hidden_gates)) { |
10038 | at::functionalization::impl::sync(hidden_gates); |
10039 | hidden_gates_ = at::functionalization::impl::from_functional_tensor(hidden_gates); |
10040 | } else { |
10041 | hidden_gates_ = hidden_gates; |
10042 | } |
10043 | |
10044 | at::Tensor hx_; |
10045 | if (at::functionalization::impl::isFunctionalTensor(hx)) { |
10046 | at::functionalization::impl::sync(hx); |
10047 | hx_ = at::functionalization::impl::from_functional_tensor(hx); |
10048 | } else { |
10049 | hx_ = hx; |
10050 | } |
10051 | |
10052 | c10::optional<at::Tensor> input_bias_; |
10053 | if (at::functionalization::impl::isFunctionalTensor(input_bias)) { |
10054 | at::functionalization::impl::sync(input_bias); |
10055 | input_bias_ = at::functionalization::impl::from_functional_tensor(input_bias); |
10056 | } else { |
10057 | input_bias_ = input_bias; |
10058 | } |
10059 | |
10060 | c10::optional<at::Tensor> hidden_bias_; |
10061 | if (at::functionalization::impl::isFunctionalTensor(hidden_bias)) { |
10062 | at::functionalization::impl::sync(hidden_bias); |
10063 | hidden_bias_ = at::functionalization::impl::from_functional_tensor(hidden_bias); |
10064 | } else { |
10065 | hidden_bias_ = hidden_bias; |
10066 | } |
10067 | |
10068 | at::Tensor out0_; |
10069 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
10070 | at::functionalization::impl::sync(out0); |
10071 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
10072 | } else { |
10073 | out0_ = out0; |
10074 | } |
10075 | |
10076 | at::Tensor out1_; |
10077 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
10078 | at::functionalization::impl::sync(out1); |
10079 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
10080 | } else { |
10081 | out1_ = out1; |
10082 | } |
10083 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
10084 | if ((false || at::functionalization::impl::isFunctionalTensor(input_gates) || at::functionalization::impl::isFunctionalTensor(hidden_gates) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(input_bias) || at::functionalization::impl::isFunctionalTensor(hidden_bias))) { |
10085 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10086 | TORCH_INTERNAL_ASSERT(false, |
10087 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10088 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10089 | } else { |
10090 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10091 | at::AutoDispatchSkipFunctionalize guard; |
10092 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_thnn_fused_gru_cell_out::call(input_gates_, hidden_gates_, hx_, input_bias_, hidden_bias_, out0_, out1_); |
10093 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
10094 | } |
10095 | } else { |
10096 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
10097 | { |
10098 | at::AutoDispatchSkipFunctionalize guard; |
10099 | tmp_output = at::_ops::_thnn_fused_gru_cell::call(input_gates_, hidden_gates_, hx_, input_bias_, hidden_bias_); |
10100 | } |
10101 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
10102 | at::functionalization::impl::commit_update(out0); |
10103 | at::functionalization::impl::sync(out0); |
10104 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
10105 | at::functionalization::impl::commit_update(out1); |
10106 | at::functionalization::impl::sync(out1); |
10107 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
10108 | } |
10109 | } |
10110 | |
10111 | ::std::tuple<at::Tensor &,at::Tensor &> _pack_padded_sequence_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1) { |
10112 | if (false) { |
10113 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10114 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10115 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10116 | auto input_meta = to_meta(input); |
10117 | auto lengths_meta = to_meta(lengths); |
10118 | auto out0_meta = to_meta(out0); |
10119 | auto out1_meta = to_meta(out1); |
10120 | at::AutoDispatchSkipFunctionalize func_guard; |
10121 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10122 | at::_ops::_pack_padded_sequence_out::call(input_meta, lengths_meta, batch_first, out0_meta, out1_meta); |
10123 | } |
10124 | |
10125 | at::Tensor input_; |
10126 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
10127 | at::functionalization::impl::sync(input); |
10128 | input_ = at::functionalization::impl::from_functional_tensor(input); |
10129 | } else { |
10130 | input_ = input; |
10131 | } |
10132 | |
10133 | at::Tensor lengths_; |
10134 | if (at::functionalization::impl::isFunctionalTensor(lengths)) { |
10135 | at::functionalization::impl::sync(lengths); |
10136 | lengths_ = at::functionalization::impl::from_functional_tensor(lengths); |
10137 | } else { |
10138 | lengths_ = lengths; |
10139 | } |
10140 | |
10141 | at::Tensor out0_; |
10142 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
10143 | at::functionalization::impl::sync(out0); |
10144 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
10145 | } else { |
10146 | out0_ = out0; |
10147 | } |
10148 | |
10149 | at::Tensor out1_; |
10150 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
10151 | at::functionalization::impl::sync(out1); |
10152 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
10153 | } else { |
10154 | out1_ = out1; |
10155 | } |
10156 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
10157 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(lengths))) { |
10158 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10159 | TORCH_INTERNAL_ASSERT(false, |
10160 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10161 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10162 | } else { |
10163 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10164 | at::AutoDispatchSkipFunctionalize guard; |
10165 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_pack_padded_sequence_out::call(input_, lengths_, batch_first, out0_, out1_); |
10166 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
10167 | } |
10168 | } else { |
10169 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
10170 | { |
10171 | at::AutoDispatchSkipFunctionalize guard; |
10172 | tmp_output = at::_ops::_pack_padded_sequence::call(input_, lengths_, batch_first); |
10173 | } |
10174 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
10175 | at::functionalization::impl::commit_update(out0); |
10176 | at::functionalization::impl::sync(out0); |
10177 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
10178 | at::functionalization::impl::commit_update(out1); |
10179 | at::functionalization::impl::sync(out1); |
10180 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
10181 | } |
10182 | } |
10183 | |
10184 | at::Tensor & scatter_reduce_out_two_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) { |
10185 | if (false) { |
10186 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10187 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10188 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10189 | auto self_meta = to_meta(self); |
10190 | auto index_meta = to_meta(index); |
10191 | auto src_meta = to_meta(src); |
10192 | auto out_meta = to_meta(out); |
10193 | at::AutoDispatchSkipFunctionalize func_guard; |
10194 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10195 | at::_ops::scatter_reduce_two_out::call(self_meta, dim, index_meta, src_meta, reduce, include_self, out_meta); |
10196 | } |
10197 | |
10198 | at::Tensor self_; |
10199 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10200 | at::functionalization::impl::sync(self); |
10201 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10202 | } else { |
10203 | self_ = self; |
10204 | } |
10205 | |
10206 | at::Tensor index_; |
10207 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
10208 | at::functionalization::impl::sync(index); |
10209 | index_ = at::functionalization::impl::from_functional_tensor(index); |
10210 | } else { |
10211 | index_ = index; |
10212 | } |
10213 | |
10214 | at::Tensor src_; |
10215 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
10216 | at::functionalization::impl::sync(src); |
10217 | src_ = at::functionalization::impl::from_functional_tensor(src); |
10218 | } else { |
10219 | src_ = src; |
10220 | } |
10221 | |
10222 | at::Tensor out_; |
10223 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10224 | at::functionalization::impl::sync(out); |
10225 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10226 | } else { |
10227 | out_ = out; |
10228 | } |
10229 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10230 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) { |
10231 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10232 | TORCH_INTERNAL_ASSERT(false, |
10233 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10234 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10235 | } else { |
10236 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10237 | at::AutoDispatchSkipFunctionalize guard; |
10238 | at::Tensor tmp_output = at::_ops::scatter_reduce_two_out::call(self_, dim, index_, src_, reduce, include_self, out_); |
10239 | return out;; |
10240 | } |
10241 | } else { |
10242 | at::Tensor tmp_output; |
10243 | { |
10244 | at::AutoDispatchSkipFunctionalize guard; |
10245 | tmp_output = at::_ops::scatter_reduce_two::call(self_, dim, index_, src_, reduce, include_self); |
10246 | } |
10247 | at::functionalization::impl::replace_(out, tmp_output); |
10248 | at::functionalization::impl::commit_update(out); |
10249 | at::functionalization::impl::sync(out); |
10250 | return out; |
10251 | } |
10252 | } |
10253 | |
10254 | at::Tensor & scatter_reduce__two(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) { |
10255 | if (true) { |
10256 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10257 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10258 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10259 | auto self_meta = to_meta(self); |
10260 | auto index_meta = to_meta(index); |
10261 | auto src_meta = to_meta(src); |
10262 | at::AutoDispatchSkipFunctionalize func_guard; |
10263 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10264 | at::_ops::scatter_reduce__two::call(self_meta, dim, index_meta, src_meta, reduce, include_self); |
10265 | } |
10266 | |
10267 | at::Tensor self_; |
10268 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10269 | at::functionalization::impl::sync(self); |
10270 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10271 | } else { |
10272 | self_ = self; |
10273 | } |
10274 | |
10275 | at::Tensor index_; |
10276 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
10277 | at::functionalization::impl::sync(index); |
10278 | index_ = at::functionalization::impl::from_functional_tensor(index); |
10279 | } else { |
10280 | index_ = index; |
10281 | } |
10282 | |
10283 | at::Tensor src_; |
10284 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
10285 | at::functionalization::impl::sync(src); |
10286 | src_ = at::functionalization::impl::from_functional_tensor(src); |
10287 | } else { |
10288 | src_ = src; |
10289 | } |
10290 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10291 | if ((false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) { |
10292 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10293 | TORCH_INTERNAL_ASSERT(false, |
10294 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10295 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10296 | } else { |
10297 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10298 | at::AutoDispatchSkipFunctionalize guard; |
10299 | at::Tensor tmp_output = at::_ops::scatter_reduce__two::call(self_, dim, index_, src_, reduce, include_self); |
10300 | return self;; |
10301 | } |
10302 | } else { |
10303 | at::Tensor tmp_output; |
10304 | { |
10305 | at::AutoDispatchSkipFunctionalize guard; |
10306 | tmp_output = at::_ops::scatter_reduce_two::call(self_, dim, index_, src_, reduce, include_self); |
10307 | } |
10308 | at::functionalization::impl::replace_(self, tmp_output); |
10309 | at::functionalization::impl::commit_update(self); |
10310 | at::functionalization::impl::sync(self); |
10311 | return self; |
10312 | } |
10313 | } |
10314 | |
10315 | at::Tensor & bitwise_xor_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
10316 | if (false) { |
10317 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10318 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10319 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10320 | auto self_meta = to_meta(self); |
10321 | auto other_meta = to_meta(other); |
10322 | auto out_meta = to_meta(out); |
10323 | at::AutoDispatchSkipFunctionalize func_guard; |
10324 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10325 | at::_ops::bitwise_xor_Tensor_out::call(self_meta, other_meta, out_meta); |
10326 | } |
10327 | |
10328 | at::Tensor self_; |
10329 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10330 | at::functionalization::impl::sync(self); |
10331 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10332 | } else { |
10333 | self_ = self; |
10334 | } |
10335 | |
10336 | at::Tensor other_; |
10337 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
10338 | at::functionalization::impl::sync(other); |
10339 | other_ = at::functionalization::impl::from_functional_tensor(other); |
10340 | } else { |
10341 | other_ = other; |
10342 | } |
10343 | |
10344 | at::Tensor out_; |
10345 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10346 | at::functionalization::impl::sync(out); |
10347 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10348 | } else { |
10349 | out_ = out; |
10350 | } |
10351 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10352 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
10353 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10354 | TORCH_INTERNAL_ASSERT(false, |
10355 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10356 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10357 | } else { |
10358 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10359 | at::AutoDispatchSkipFunctionalize guard; |
10360 | at::Tensor tmp_output = at::_ops::bitwise_xor_Tensor_out::call(self_, other_, out_); |
10361 | return out;; |
10362 | } |
10363 | } else { |
10364 | at::Tensor tmp_output; |
10365 | { |
10366 | at::AutoDispatchSkipFunctionalize guard; |
10367 | tmp_output = at::_ops::bitwise_xor_Tensor::call(self_, other_); |
10368 | } |
10369 | at::functionalization::impl::replace_(out, tmp_output); |
10370 | at::functionalization::impl::commit_update(out); |
10371 | at::functionalization::impl::sync(out); |
10372 | return out; |
10373 | } |
10374 | } |
10375 | |
10376 | at::Tensor & bitwise_xor__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
10377 | if (true) { |
10378 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10379 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10380 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10381 | auto self_meta = to_meta(self); |
10382 | auto other_meta = to_meta(other); |
10383 | at::AutoDispatchSkipFunctionalize func_guard; |
10384 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10385 | at::_ops::bitwise_xor__Tensor::call(self_meta, other_meta); |
10386 | } |
10387 | |
10388 | at::Tensor self_; |
10389 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10390 | at::functionalization::impl::sync(self); |
10391 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10392 | } else { |
10393 | self_ = self; |
10394 | } |
10395 | |
10396 | at::Tensor other_; |
10397 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
10398 | at::functionalization::impl::sync(other); |
10399 | other_ = at::functionalization::impl::from_functional_tensor(other); |
10400 | } else { |
10401 | other_ = other; |
10402 | } |
10403 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10404 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
10405 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10406 | TORCH_INTERNAL_ASSERT(false, |
10407 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10408 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10409 | } else { |
10410 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10411 | at::AutoDispatchSkipFunctionalize guard; |
10412 | at::Tensor tmp_output = at::_ops::bitwise_xor__Tensor::call(self_, other_); |
10413 | return self;; |
10414 | } |
10415 | } else { |
10416 | at::Tensor tmp_output; |
10417 | { |
10418 | at::AutoDispatchSkipFunctionalize guard; |
10419 | tmp_output = at::_ops::bitwise_xor_Tensor::call(self_, other_); |
10420 | } |
10421 | at::functionalization::impl::replace_(self, tmp_output); |
10422 | at::functionalization::impl::commit_update(self); |
10423 | at::functionalization::impl::sync(self); |
10424 | return self; |
10425 | } |
10426 | } |
10427 | |
10428 | at::Tensor & bitwise_xor_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
10429 | if (false) { |
10430 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10431 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10432 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10433 | auto self_meta = to_meta(self); |
10434 | auto out_meta = to_meta(out); |
10435 | at::AutoDispatchSkipFunctionalize func_guard; |
10436 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10437 | at::_ops::bitwise_xor_Scalar_out::call(self_meta, other, out_meta); |
10438 | } |
10439 | |
10440 | at::Tensor self_; |
10441 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10442 | at::functionalization::impl::sync(self); |
10443 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10444 | } else { |
10445 | self_ = self; |
10446 | } |
10447 | |
10448 | at::Tensor out_; |
10449 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10450 | at::functionalization::impl::sync(out); |
10451 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10452 | } else { |
10453 | out_ = out; |
10454 | } |
10455 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10456 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10457 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10458 | TORCH_INTERNAL_ASSERT(false, |
10459 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10460 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10461 | } else { |
10462 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10463 | at::AutoDispatchSkipFunctionalize guard; |
10464 | at::Tensor tmp_output = at::_ops::bitwise_xor_Scalar_out::call(self_, other, out_); |
10465 | return out;; |
10466 | } |
10467 | } else { |
10468 | at::Tensor tmp_output; |
10469 | { |
10470 | at::AutoDispatchSkipFunctionalize guard; |
10471 | tmp_output = at::_ops::bitwise_xor_Scalar::call(self_, other); |
10472 | } |
10473 | at::functionalization::impl::replace_(out, tmp_output); |
10474 | at::functionalization::impl::commit_update(out); |
10475 | at::functionalization::impl::sync(out); |
10476 | return out; |
10477 | } |
10478 | } |
10479 | |
10480 | at::Tensor & bitwise_xor__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
10481 | if (true) { |
10482 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10483 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10484 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10485 | auto self_meta = to_meta(self); |
10486 | at::AutoDispatchSkipFunctionalize func_guard; |
10487 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10488 | at::_ops::bitwise_xor__Scalar::call(self_meta, other); |
10489 | } |
10490 | |
10491 | at::Tensor self_; |
10492 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10493 | at::functionalization::impl::sync(self); |
10494 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10495 | } else { |
10496 | self_ = self; |
10497 | } |
10498 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10499 | if ((false)) { |
10500 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10501 | TORCH_INTERNAL_ASSERT(false, |
10502 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10503 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10504 | } else { |
10505 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10506 | at::AutoDispatchSkipFunctionalize guard; |
10507 | at::Tensor tmp_output = at::_ops::bitwise_xor__Scalar::call(self_, other); |
10508 | return self;; |
10509 | } |
10510 | } else { |
10511 | at::Tensor tmp_output; |
10512 | { |
10513 | at::AutoDispatchSkipFunctionalize guard; |
10514 | tmp_output = at::_ops::bitwise_xor_Scalar::call(self_, other); |
10515 | } |
10516 | at::functionalization::impl::replace_(self, tmp_output); |
10517 | at::functionalization::impl::commit_update(self); |
10518 | at::functionalization::impl::sync(self); |
10519 | return self; |
10520 | } |
10521 | } |
10522 | |
10523 | at::Tensor & bitwise_xor_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
10524 | if (false) { |
10525 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10526 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10527 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10528 | auto other_meta = to_meta(other); |
10529 | auto out_meta = to_meta(out); |
10530 | at::AutoDispatchSkipFunctionalize func_guard; |
10531 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10532 | at::_ops::bitwise_xor_Scalar_Tensor_out::call(self, other_meta, out_meta); |
10533 | } |
10534 | |
10535 | at::Tensor other_; |
10536 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
10537 | at::functionalization::impl::sync(other); |
10538 | other_ = at::functionalization::impl::from_functional_tensor(other); |
10539 | } else { |
10540 | other_ = other; |
10541 | } |
10542 | |
10543 | at::Tensor out_; |
10544 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10545 | at::functionalization::impl::sync(out); |
10546 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10547 | } else { |
10548 | out_ = out; |
10549 | } |
10550 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10551 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
10552 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10553 | TORCH_INTERNAL_ASSERT(false, |
10554 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10555 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10556 | } else { |
10557 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10558 | at::AutoDispatchSkipFunctionalize guard; |
10559 | at::Tensor tmp_output = at::_ops::bitwise_xor_Scalar_Tensor_out::call(self, other_, out_); |
10560 | return out;; |
10561 | } |
10562 | } else { |
10563 | at::Tensor tmp_output; |
10564 | { |
10565 | at::AutoDispatchSkipFunctionalize guard; |
10566 | tmp_output = at::_ops::bitwise_xor_Scalar_Tensor::call(self, other_); |
10567 | } |
10568 | at::functionalization::impl::replace_(out, tmp_output); |
10569 | at::functionalization::impl::commit_update(out); |
10570 | at::functionalization::impl::sync(out); |
10571 | return out; |
10572 | } |
10573 | } |
10574 | |
10575 | at::Tensor & addbmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
10576 | if (false) { |
10577 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10578 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10579 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10580 | auto self_meta = to_meta(self); |
10581 | auto batch1_meta = to_meta(batch1); |
10582 | auto batch2_meta = to_meta(batch2); |
10583 | auto out_meta = to_meta(out); |
10584 | at::AutoDispatchSkipFunctionalize func_guard; |
10585 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10586 | at::_ops::addbmm_out::call(self_meta, batch1_meta, batch2_meta, beta, alpha, out_meta); |
10587 | } |
10588 | |
10589 | at::Tensor self_; |
10590 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10591 | at::functionalization::impl::sync(self); |
10592 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10593 | } else { |
10594 | self_ = self; |
10595 | } |
10596 | |
10597 | at::Tensor batch1_; |
10598 | if (at::functionalization::impl::isFunctionalTensor(batch1)) { |
10599 | at::functionalization::impl::sync(batch1); |
10600 | batch1_ = at::functionalization::impl::from_functional_tensor(batch1); |
10601 | } else { |
10602 | batch1_ = batch1; |
10603 | } |
10604 | |
10605 | at::Tensor batch2_; |
10606 | if (at::functionalization::impl::isFunctionalTensor(batch2)) { |
10607 | at::functionalization::impl::sync(batch2); |
10608 | batch2_ = at::functionalization::impl::from_functional_tensor(batch2); |
10609 | } else { |
10610 | batch2_ = batch2; |
10611 | } |
10612 | |
10613 | at::Tensor out_; |
10614 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10615 | at::functionalization::impl::sync(out); |
10616 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10617 | } else { |
10618 | out_ = out; |
10619 | } |
10620 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10621 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(batch1) || at::functionalization::impl::isFunctionalTensor(batch2))) { |
10622 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10623 | TORCH_INTERNAL_ASSERT(false, |
10624 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10625 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10626 | } else { |
10627 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10628 | at::AutoDispatchSkipFunctionalize guard; |
10629 | at::Tensor tmp_output = at::_ops::addbmm_out::call(self_, batch1_, batch2_, beta, alpha, out_); |
10630 | return out;; |
10631 | } |
10632 | } else { |
10633 | at::Tensor tmp_output; |
10634 | { |
10635 | at::AutoDispatchSkipFunctionalize guard; |
10636 | tmp_output = at::_ops::addbmm::call(self_, batch1_, batch2_, beta, alpha); |
10637 | } |
10638 | at::functionalization::impl::replace_(out, tmp_output); |
10639 | at::functionalization::impl::commit_update(out); |
10640 | at::functionalization::impl::sync(out); |
10641 | return out; |
10642 | } |
10643 | } |
10644 | |
10645 | at::Tensor & addbmm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) { |
10646 | if (true) { |
10647 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10648 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10649 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10650 | auto self_meta = to_meta(self); |
10651 | auto batch1_meta = to_meta(batch1); |
10652 | auto batch2_meta = to_meta(batch2); |
10653 | at::AutoDispatchSkipFunctionalize func_guard; |
10654 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10655 | at::_ops::addbmm_::call(self_meta, batch1_meta, batch2_meta, beta, alpha); |
10656 | } |
10657 | |
10658 | at::Tensor self_; |
10659 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10660 | at::functionalization::impl::sync(self); |
10661 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10662 | } else { |
10663 | self_ = self; |
10664 | } |
10665 | |
10666 | at::Tensor batch1_; |
10667 | if (at::functionalization::impl::isFunctionalTensor(batch1)) { |
10668 | at::functionalization::impl::sync(batch1); |
10669 | batch1_ = at::functionalization::impl::from_functional_tensor(batch1); |
10670 | } else { |
10671 | batch1_ = batch1; |
10672 | } |
10673 | |
10674 | at::Tensor batch2_; |
10675 | if (at::functionalization::impl::isFunctionalTensor(batch2)) { |
10676 | at::functionalization::impl::sync(batch2); |
10677 | batch2_ = at::functionalization::impl::from_functional_tensor(batch2); |
10678 | } else { |
10679 | batch2_ = batch2; |
10680 | } |
10681 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10682 | if ((false || at::functionalization::impl::isFunctionalTensor(batch1) || at::functionalization::impl::isFunctionalTensor(batch2))) { |
10683 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10684 | TORCH_INTERNAL_ASSERT(false, |
10685 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10686 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10687 | } else { |
10688 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10689 | at::AutoDispatchSkipFunctionalize guard; |
10690 | at::Tensor tmp_output = at::_ops::addbmm_::call(self_, batch1_, batch2_, beta, alpha); |
10691 | return self;; |
10692 | } |
10693 | } else { |
10694 | at::Tensor tmp_output; |
10695 | { |
10696 | at::AutoDispatchSkipFunctionalize guard; |
10697 | tmp_output = at::_ops::addbmm::call(self_, batch1_, batch2_, beta, alpha); |
10698 | } |
10699 | at::functionalization::impl::replace_(self, tmp_output); |
10700 | at::functionalization::impl::commit_update(self); |
10701 | at::functionalization::impl::sync(self); |
10702 | return self; |
10703 | } |
10704 | } |
10705 | |
10706 | at::Tensor & random_out_from_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator, at::Tensor & out) { |
10707 | if (false) { |
10708 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10709 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10710 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10711 | auto self_meta = to_meta(self); |
10712 | auto out_meta = to_meta(out); |
10713 | at::AutoDispatchSkipFunctionalize func_guard; |
10714 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10715 | at::_ops::random_from_out::call(self_meta, from, to, generator, out_meta); |
10716 | } |
10717 | |
10718 | at::Tensor self_; |
10719 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10720 | at::functionalization::impl::sync(self); |
10721 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10722 | } else { |
10723 | self_ = self; |
10724 | } |
10725 | |
10726 | at::Tensor out_; |
10727 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10728 | at::functionalization::impl::sync(out); |
10729 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10730 | } else { |
10731 | out_ = out; |
10732 | } |
10733 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10734 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10735 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10736 | TORCH_INTERNAL_ASSERT(false, |
10737 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10738 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10739 | } else { |
10740 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10741 | at::AutoDispatchSkipFunctionalize guard; |
10742 | at::Tensor tmp_output = at::_ops::random_from_out::call(self_, from, to, generator, out_); |
10743 | return out;; |
10744 | } |
10745 | } else { |
10746 | at::Tensor tmp_output; |
10747 | { |
10748 | at::AutoDispatchSkipFunctionalize guard; |
10749 | tmp_output = at::_ops::random_from::call(self_, from, to, generator); |
10750 | } |
10751 | at::functionalization::impl::replace_(out, tmp_output); |
10752 | at::functionalization::impl::commit_update(out); |
10753 | at::functionalization::impl::sync(out); |
10754 | return out; |
10755 | } |
10756 | } |
10757 | |
10758 | at::Tensor & random__from(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) { |
10759 | if (true) { |
10760 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10761 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10762 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10763 | auto self_meta = to_meta(self); |
10764 | at::AutoDispatchSkipFunctionalize func_guard; |
10765 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10766 | at::_ops::random__from::call(self_meta, from, to, generator); |
10767 | } |
10768 | |
10769 | at::Tensor self_; |
10770 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10771 | at::functionalization::impl::sync(self); |
10772 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10773 | } else { |
10774 | self_ = self; |
10775 | } |
10776 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10777 | if ((false)) { |
10778 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10779 | TORCH_INTERNAL_ASSERT(false, |
10780 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10781 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10782 | } else { |
10783 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10784 | at::AutoDispatchSkipFunctionalize guard; |
10785 | at::Tensor tmp_output = at::_ops::random__from::call(self_, from, to, generator); |
10786 | return self;; |
10787 | } |
10788 | } else { |
10789 | at::Tensor tmp_output; |
10790 | { |
10791 | at::AutoDispatchSkipFunctionalize guard; |
10792 | tmp_output = at::_ops::random_from::call(self_, from, to, generator); |
10793 | } |
10794 | at::functionalization::impl::replace_(self, tmp_output); |
10795 | at::functionalization::impl::commit_update(self); |
10796 | at::functionalization::impl::sync(self); |
10797 | return self; |
10798 | } |
10799 | } |
10800 | |
10801 | at::Tensor & random_out_to_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator, at::Tensor & out) { |
10802 | if (false) { |
10803 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10804 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10805 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10806 | auto self_meta = to_meta(self); |
10807 | auto out_meta = to_meta(out); |
10808 | at::AutoDispatchSkipFunctionalize func_guard; |
10809 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10810 | at::_ops::random_to_out::call(self_meta, to, generator, out_meta); |
10811 | } |
10812 | |
10813 | at::Tensor self_; |
10814 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10815 | at::functionalization::impl::sync(self); |
10816 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10817 | } else { |
10818 | self_ = self; |
10819 | } |
10820 | |
10821 | at::Tensor out_; |
10822 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10823 | at::functionalization::impl::sync(out); |
10824 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10825 | } else { |
10826 | out_ = out; |
10827 | } |
10828 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10829 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10830 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10831 | TORCH_INTERNAL_ASSERT(false, |
10832 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10833 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10834 | } else { |
10835 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10836 | at::AutoDispatchSkipFunctionalize guard; |
10837 | at::Tensor tmp_output = at::_ops::random_to_out::call(self_, to, generator, out_); |
10838 | return out;; |
10839 | } |
10840 | } else { |
10841 | at::Tensor tmp_output; |
10842 | { |
10843 | at::AutoDispatchSkipFunctionalize guard; |
10844 | tmp_output = at::_ops::random_to::call(self_, to, generator); |
10845 | } |
10846 | at::functionalization::impl::replace_(out, tmp_output); |
10847 | at::functionalization::impl::commit_update(out); |
10848 | at::functionalization::impl::sync(out); |
10849 | return out; |
10850 | } |
10851 | } |
10852 | |
10853 | at::Tensor & random__to(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) { |
10854 | if (true) { |
10855 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10856 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10857 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10858 | auto self_meta = to_meta(self); |
10859 | at::AutoDispatchSkipFunctionalize func_guard; |
10860 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10861 | at::_ops::random__to::call(self_meta, to, generator); |
10862 | } |
10863 | |
10864 | at::Tensor self_; |
10865 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10866 | at::functionalization::impl::sync(self); |
10867 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10868 | } else { |
10869 | self_ = self; |
10870 | } |
10871 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10872 | if ((false)) { |
10873 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10874 | TORCH_INTERNAL_ASSERT(false, |
10875 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10876 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10877 | } else { |
10878 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10879 | at::AutoDispatchSkipFunctionalize guard; |
10880 | at::Tensor tmp_output = at::_ops::random__to::call(self_, to, generator); |
10881 | return self;; |
10882 | } |
10883 | } else { |
10884 | at::Tensor tmp_output; |
10885 | { |
10886 | at::AutoDispatchSkipFunctionalize guard; |
10887 | tmp_output = at::_ops::random_to::call(self_, to, generator); |
10888 | } |
10889 | at::functionalization::impl::replace_(self, tmp_output); |
10890 | at::functionalization::impl::commit_update(self); |
10891 | at::functionalization::impl::sync(self); |
10892 | return self; |
10893 | } |
10894 | } |
10895 | |
10896 | at::Tensor & random_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
10897 | if (false) { |
10898 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10899 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10900 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10901 | auto self_meta = to_meta(self); |
10902 | auto out_meta = to_meta(out); |
10903 | at::AutoDispatchSkipFunctionalize func_guard; |
10904 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10905 | at::_ops::random_out::call(self_meta, generator, out_meta); |
10906 | } |
10907 | |
10908 | at::Tensor self_; |
10909 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10910 | at::functionalization::impl::sync(self); |
10911 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10912 | } else { |
10913 | self_ = self; |
10914 | } |
10915 | |
10916 | at::Tensor out_; |
10917 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10918 | at::functionalization::impl::sync(out); |
10919 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10920 | } else { |
10921 | out_ = out; |
10922 | } |
10923 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10924 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10925 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10926 | TORCH_INTERNAL_ASSERT(false, |
10927 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10928 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10929 | } else { |
10930 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10931 | at::AutoDispatchSkipFunctionalize guard; |
10932 | at::Tensor tmp_output = at::_ops::random_out::call(self_, generator, out_); |
10933 | return out;; |
10934 | } |
10935 | } else { |
10936 | at::Tensor tmp_output; |
10937 | { |
10938 | at::AutoDispatchSkipFunctionalize guard; |
10939 | tmp_output = at::_ops::random::call(self_, generator); |
10940 | } |
10941 | at::functionalization::impl::replace_(out, tmp_output); |
10942 | at::functionalization::impl::commit_update(out); |
10943 | at::functionalization::impl::sync(out); |
10944 | return out; |
10945 | } |
10946 | } |
10947 | |
10948 | at::Tensor & random_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional<at::Generator> generator) { |
10949 | if (true) { |
10950 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10951 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10952 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10953 | auto self_meta = to_meta(self); |
10954 | at::AutoDispatchSkipFunctionalize func_guard; |
10955 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10956 | at::_ops::random_::call(self_meta, generator); |
10957 | } |
10958 | |
10959 | at::Tensor self_; |
10960 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10961 | at::functionalization::impl::sync(self); |
10962 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10963 | } else { |
10964 | self_ = self; |
10965 | } |
10966 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10967 | if ((false)) { |
10968 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10969 | TORCH_INTERNAL_ASSERT(false, |
10970 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10971 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10972 | } else { |
10973 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10974 | at::AutoDispatchSkipFunctionalize guard; |
10975 | at::Tensor tmp_output = at::_ops::random_::call(self_, generator); |
10976 | return self;; |
10977 | } |
10978 | } else { |
10979 | at::Tensor tmp_output; |
10980 | { |
10981 | at::AutoDispatchSkipFunctionalize guard; |
10982 | tmp_output = at::_ops::random::call(self_, generator); |
10983 | } |
10984 | at::functionalization::impl::replace_(self, tmp_output); |
10985 | at::functionalization::impl::commit_update(self); |
10986 | at::functionalization::impl::sync(self); |
10987 | return self; |
10988 | } |
10989 | } |
10990 | |
10991 | at::Tensor & exponential_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd, c10::optional<at::Generator> generator, at::Tensor & out) { |
10992 | if (false) { |
10993 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10994 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10995 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10996 | auto self_meta = to_meta(self); |
10997 | auto out_meta = to_meta(out); |
10998 | at::AutoDispatchSkipFunctionalize func_guard; |
10999 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11000 | at::_ops::exponential_out::call(self_meta, lambd, generator, out_meta); |
11001 | } |
11002 | |
11003 | at::Tensor self_; |
11004 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11005 | at::functionalization::impl::sync(self); |
11006 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11007 | } else { |
11008 | self_ = self; |
11009 | } |
11010 | |
11011 | at::Tensor out_; |
11012 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11013 | at::functionalization::impl::sync(out); |
11014 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11015 | } else { |
11016 | out_ = out; |
11017 | } |
11018 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11019 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
11020 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11021 | TORCH_INTERNAL_ASSERT(false, |
11022 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11023 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11024 | } else { |
11025 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11026 | at::AutoDispatchSkipFunctionalize guard; |
11027 | at::Tensor tmp_output = at::_ops::exponential_out::call(self_, lambd, generator, out_); |
11028 | return out;; |
11029 | } |
11030 | } else { |
11031 | at::Tensor tmp_output; |
11032 | { |
11033 | at::AutoDispatchSkipFunctionalize guard; |
11034 | tmp_output = at::_ops::exponential::call(self_, lambd, generator); |
11035 | } |
11036 | at::functionalization::impl::replace_(out, tmp_output); |
11037 | at::functionalization::impl::commit_update(out); |
11038 | at::functionalization::impl::sync(out); |
11039 | return out; |
11040 | } |
11041 | } |
11042 | |
11043 | at::Tensor & exponential_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double lambd, c10::optional<at::Generator> generator) { |
11044 | if (true) { |
11045 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11046 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11047 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11048 | auto self_meta = to_meta(self); |
11049 | at::AutoDispatchSkipFunctionalize func_guard; |
11050 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11051 | at::_ops::exponential_::call(self_meta, lambd, generator); |
11052 | } |
11053 | |
11054 | at::Tensor self_; |
11055 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11056 | at::functionalization::impl::sync(self); |
11057 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11058 | } else { |
11059 | self_ = self; |
11060 | } |
11061 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11062 | if ((false)) { |
11063 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11064 | TORCH_INTERNAL_ASSERT(false, |
11065 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11066 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11067 | } else { |
11068 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11069 | at::AutoDispatchSkipFunctionalize guard; |
11070 | at::Tensor tmp_output = at::_ops::exponential_::call(self_, lambd, generator); |
11071 | return self;; |
11072 | } |
11073 | } else { |
11074 | at::Tensor tmp_output; |
11075 | { |
11076 | at::AutoDispatchSkipFunctionalize guard; |
11077 | tmp_output = at::_ops::exponential::call(self_, lambd, generator); |
11078 | } |
11079 | at::functionalization::impl::replace_(self, tmp_output); |
11080 | at::functionalization::impl::commit_update(self); |
11081 | at::functionalization::impl::sync(self); |
11082 | return self; |
11083 | } |
11084 | } |
11085 | |
11086 | at::Tensor & geometric_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out) { |
11087 | if (false) { |
11088 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11089 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11090 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11091 | auto self_meta = to_meta(self); |
11092 | auto out_meta = to_meta(out); |
11093 | at::AutoDispatchSkipFunctionalize func_guard; |
11094 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11095 | at::_ops::geometric_out::call(self_meta, p, generator, out_meta); |
11096 | } |
11097 | |
11098 | at::Tensor self_; |
11099 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11100 | at::functionalization::impl::sync(self); |
11101 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11102 | } else { |
11103 | self_ = self; |
11104 | } |
11105 | |
11106 | at::Tensor out_; |
11107 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11108 | at::functionalization::impl::sync(out); |
11109 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11110 | } else { |
11111 | out_ = out; |
11112 | } |
11113 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11114 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
11115 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11116 | TORCH_INTERNAL_ASSERT(false, |
11117 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11118 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11119 | } else { |
11120 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11121 | at::AutoDispatchSkipFunctionalize guard; |
11122 | at::Tensor tmp_output = at::_ops::geometric_out::call(self_, p, generator, out_); |
11123 | return out;; |
11124 | } |
11125 | } else { |
11126 | at::Tensor tmp_output; |
11127 | { |
11128 | at::AutoDispatchSkipFunctionalize guard; |
11129 | tmp_output = at::_ops::geometric::call(self_, p, generator); |
11130 | } |
11131 | at::functionalization::impl::replace_(out, tmp_output); |
11132 | at::functionalization::impl::commit_update(out); |
11133 | at::functionalization::impl::sync(out); |
11134 | return out; |
11135 | } |
11136 | } |
11137 | |
11138 | at::Tensor & geometric_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, c10::optional<at::Generator> generator) { |
11139 | if (true) { |
11140 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11141 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11142 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11143 | auto self_meta = to_meta(self); |
11144 | at::AutoDispatchSkipFunctionalize func_guard; |
11145 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11146 | at::_ops::geometric_::call(self_meta, p, generator); |
11147 | } |
11148 | |
11149 | at::Tensor self_; |
11150 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11151 | at::functionalization::impl::sync(self); |
11152 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11153 | } else { |
11154 | self_ = self; |
11155 | } |
11156 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11157 | if ((false)) { |
11158 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11159 | TORCH_INTERNAL_ASSERT(false, |
11160 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11161 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11162 | } else { |
11163 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11164 | at::AutoDispatchSkipFunctionalize guard; |
11165 | at::Tensor tmp_output = at::_ops::geometric_::call(self_, p, generator); |
11166 | return self;; |
11167 | } |
11168 | } else { |
11169 | at::Tensor tmp_output; |
11170 | { |
11171 | at::AutoDispatchSkipFunctionalize guard; |
11172 | tmp_output = at::_ops::geometric::call(self_, p, generator); |
11173 | } |
11174 | at::functionalization::impl::replace_(self, tmp_output); |
11175 | at::functionalization::impl::commit_update(self); |
11176 | at::functionalization::impl::sync(self); |
11177 | return self; |
11178 | } |
11179 | } |
11180 | |
11181 | at::Tensor & cross_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim, at::Tensor & out) { |
11182 | if (false) { |
11183 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11184 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11185 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11186 | auto self_meta = to_meta(self); |
11187 | auto other_meta = to_meta(other); |
11188 | auto out_meta = to_meta(out); |
11189 | at::AutoDispatchSkipFunctionalize func_guard; |
11190 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11191 | at::_ops::cross_out::call(self_meta, other_meta, dim, out_meta); |
11192 | } |
11193 | |
11194 | at::Tensor self_; |
11195 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11196 | at::functionalization::impl::sync(self); |
11197 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11198 | } else { |
11199 | self_ = self; |
11200 | } |
11201 | |
11202 | at::Tensor other_; |
11203 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
11204 | at::functionalization::impl::sync(other); |
11205 | other_ = at::functionalization::impl::from_functional_tensor(other); |
11206 | } else { |
11207 | other_ = other; |
11208 | } |
11209 | |
11210 | at::Tensor out_; |
11211 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11212 | at::functionalization::impl::sync(out); |
11213 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11214 | } else { |
11215 | out_ = out; |
11216 | } |
11217 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11218 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
11219 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11220 | TORCH_INTERNAL_ASSERT(false, |
11221 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11222 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11223 | } else { |
11224 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11225 | at::AutoDispatchSkipFunctionalize guard; |
11226 | at::Tensor tmp_output = at::_ops::cross_out::call(self_, other_, dim, out_); |
11227 | return out;; |
11228 | } |
11229 | } else { |
11230 | at::Tensor tmp_output; |
11231 | { |
11232 | at::AutoDispatchSkipFunctionalize guard; |
11233 | tmp_output = at::_ops::cross::call(self_, other_, dim); |
11234 | } |
11235 | at::functionalization::impl::replace_(out, tmp_output); |
11236 | at::functionalization::impl::commit_update(out); |
11237 | at::functionalization::impl::sync(out); |
11238 | return out; |
11239 | } |
11240 | } |
11241 | |
11242 | at::Tensor & trace_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
11243 | if (false) { |
11244 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11245 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11246 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11247 | auto self_meta = to_meta(self); |
11248 | auto out_meta = to_meta(out); |
11249 | at::AutoDispatchSkipFunctionalize func_guard; |
11250 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11251 | at::_ops::trace_out::call(self_meta, out_meta); |
11252 | } |
11253 | |
11254 | at::Tensor self_; |
11255 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11256 | at::functionalization::impl::sync(self); |
11257 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11258 | } else { |
11259 | self_ = self; |
11260 | } |
11261 | |
11262 | at::Tensor out_; |
11263 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11264 | at::functionalization::impl::sync(out); |
11265 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11266 | } else { |
11267 | out_ = out; |
11268 | } |
11269 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11270 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
11271 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11272 | TORCH_INTERNAL_ASSERT(false, |
11273 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11274 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11275 | } else { |
11276 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11277 | at::AutoDispatchSkipFunctionalize guard; |
11278 | at::Tensor tmp_output = at::_ops::trace_out::call(self_, out_); |
11279 | return out;; |
11280 | } |
11281 | } else { |
11282 | at::Tensor tmp_output; |
11283 | { |
11284 | at::AutoDispatchSkipFunctionalize guard; |
11285 | tmp_output = at::_ops::trace::call(self_); |
11286 | } |
11287 | at::functionalization::impl::replace_(out, tmp_output); |
11288 | at::functionalization::impl::commit_update(out); |
11289 | at::functionalization::impl::sync(out); |
11290 | return out; |
11291 | } |
11292 | } |
11293 | |
11294 | at::Tensor & take_along_dim_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim, at::Tensor & out) { |
11295 | if (false) { |
11296 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11297 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11298 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11299 | auto self_meta = to_meta(self); |
11300 | auto indices_meta = to_meta(indices); |
11301 | auto out_meta = to_meta(out); |
11302 | at::AutoDispatchSkipFunctionalize func_guard; |
11303 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11304 | at::_ops::take_along_dim_out::call(self_meta, indices_meta, dim, out_meta); |
11305 | } |
11306 | |
11307 | at::Tensor self_; |
11308 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11309 | at::functionalization::impl::sync(self); |
11310 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11311 | } else { |
11312 | self_ = self; |
11313 | } |
11314 | |
11315 | at::Tensor indices_; |
11316 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
11317 | at::functionalization::impl::sync(indices); |
11318 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
11319 | } else { |
11320 | indices_ = indices; |
11321 | } |
11322 | |
11323 | at::Tensor out_; |
11324 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11325 | at::functionalization::impl::sync(out); |
11326 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11327 | } else { |
11328 | out_ = out; |
11329 | } |
11330 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11331 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) { |
11332 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11333 | TORCH_INTERNAL_ASSERT(false, |
11334 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11335 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11336 | } else { |
11337 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11338 | at::AutoDispatchSkipFunctionalize guard; |
11339 | at::Tensor tmp_output = at::_ops::take_along_dim_out::call(self_, indices_, dim, out_); |
11340 | return out;; |
11341 | } |
11342 | } else { |
11343 | at::Tensor tmp_output; |
11344 | { |
11345 | at::AutoDispatchSkipFunctionalize guard; |
11346 | tmp_output = at::_ops::take_along_dim::call(self_, indices_, dim); |
11347 | } |
11348 | at::functionalization::impl::replace_(out, tmp_output); |
11349 | at::functionalization::impl::commit_update(out); |
11350 | at::functionalization::impl::sync(out); |
11351 | return out; |
11352 | } |
11353 | } |
11354 | |
11355 | at::Tensor & index_select_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) { |
11356 | if (false) { |
11357 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11358 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11359 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11360 | auto self_meta = to_meta(self); |
11361 | auto index_meta = to_meta(index); |
11362 | auto out_meta = to_meta(out); |
11363 | at::AutoDispatchSkipFunctionalize func_guard; |
11364 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11365 | at::_ops::index_select_out::call(self_meta, dim, index_meta, out_meta); |
11366 | } |
11367 | |
11368 | at::Tensor self_; |
11369 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11370 | at::functionalization::impl::sync(self); |
11371 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11372 | } else { |
11373 | self_ = self; |
11374 | } |
11375 | |
11376 | at::Tensor index_; |
11377 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
11378 | at::functionalization::impl::sync(index); |
11379 | index_ = at::functionalization::impl::from_functional_tensor(index); |
11380 | } else { |
11381 | index_ = index; |
11382 | } |
11383 | |
11384 | at::Tensor out_; |
11385 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11386 | at::functionalization::impl::sync(out); |
11387 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11388 | } else { |
11389 | out_ = out; |
11390 | } |
11391 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11392 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) { |
11393 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11394 | TORCH_INTERNAL_ASSERT(false, |
11395 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11396 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11397 | } else { |
11398 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11399 | at::AutoDispatchSkipFunctionalize guard; |
11400 | at::Tensor tmp_output = at::_ops::index_select_out::call(self_, dim, index_, out_); |
11401 | return out;; |
11402 | } |
11403 | } else { |
11404 | at::Tensor tmp_output; |
11405 | { |
11406 | at::AutoDispatchSkipFunctionalize guard; |
11407 | tmp_output = at::_ops::index_select::call(self_, dim, index_); |
11408 | } |
11409 | at::functionalization::impl::replace_(out, tmp_output); |
11410 | at::functionalization::impl::commit_update(out); |
11411 | at::functionalization::impl::sync(out); |
11412 | return out; |
11413 | } |
11414 | } |
11415 | |
11416 | at::Tensor & index_select_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) { |
11417 | if (false) { |
11418 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11419 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11420 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11421 | auto self_meta = to_meta(self); |
11422 | auto index_meta = to_meta(index); |
11423 | auto out_meta = to_meta(out); |
11424 | at::AutoDispatchSkipFunctionalize func_guard; |
11425 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11426 | at::_ops::index_select_dimname_out::call(self_meta, dim, index_meta, out_meta); |
11427 | } |
11428 | |
11429 | at::Tensor self_; |
11430 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11431 | at::functionalization::impl::sync(self); |
11432 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11433 | } else { |
11434 | self_ = self; |
11435 | } |
11436 | |
11437 | at::Tensor index_; |
11438 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
11439 | at::functionalization::impl::sync(index); |
11440 | index_ = at::functionalization::impl::from_functional_tensor(index); |
11441 | } else { |
11442 | index_ = index; |
11443 | } |
11444 | |
11445 | at::Tensor out_; |
11446 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11447 | at::functionalization::impl::sync(out); |
11448 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11449 | } else { |
11450 | out_ = out; |
11451 | } |
11452 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11453 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) { |
11454 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11455 | TORCH_INTERNAL_ASSERT(false, |
11456 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11457 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11458 | } else { |
11459 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11460 | at::AutoDispatchSkipFunctionalize guard; |
11461 | at::Tensor tmp_output = at::_ops::index_select_dimname_out::call(self_, dim, index_, out_); |
11462 | return out;; |
11463 | } |
11464 | } else { |
11465 | at::Tensor tmp_output; |
11466 | { |
11467 | at::AutoDispatchSkipFunctionalize guard; |
11468 | tmp_output = at::_ops::index_select_dimname::call(self_, dim, index_); |
11469 | } |
11470 | at::functionalization::impl::replace_(out, tmp_output); |
11471 | at::functionalization::impl::commit_update(out); |
11472 | at::functionalization::impl::sync(out); |
11473 | return out; |
11474 | } |
11475 | } |
11476 | |
11477 | at::Tensor & masked_select_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) { |
11478 | if (false) { |
11479 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11480 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11481 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11482 | auto self_meta = to_meta(self); |
11483 | auto mask_meta = to_meta(mask); |
11484 | auto out_meta = to_meta(out); |
11485 | at::AutoDispatchSkipFunctionalize func_guard; |
11486 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11487 | at::_ops::masked_select_out::call(self_meta, mask_meta, out_meta); |
11488 | } |
11489 | |
11490 | at::Tensor self_; |
11491 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11492 | at::functionalization::impl::sync(self); |
11493 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11494 | } else { |
11495 | self_ = self; |
11496 | } |
11497 | |
11498 | at::Tensor mask_; |
11499 | if (at::functionalization::impl::isFunctionalTensor(mask)) { |
11500 | at::functionalization::impl::sync(mask); |
11501 | mask_ = at::functionalization::impl::from_functional_tensor(mask); |
11502 | } else { |
11503 | mask_ = mask; |
11504 | } |
11505 | |
11506 | at::Tensor out_; |
11507 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11508 | at::functionalization::impl::sync(out); |
11509 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11510 | } else { |
11511 | out_ = out; |
11512 | } |
11513 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11514 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask))) { |
11515 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11516 | TORCH_INTERNAL_ASSERT(false, |
11517 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11518 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11519 | } else { |
11520 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11521 | at::AutoDispatchSkipFunctionalize guard; |
11522 | at::Tensor tmp_output = at::_ops::masked_select_out::call(self_, mask_, out_); |
11523 | return out;; |
11524 | } |
11525 | } else { |
11526 | at::Tensor tmp_output; |
11527 | { |
11528 | at::AutoDispatchSkipFunctionalize guard; |
11529 | tmp_output = at::_ops::masked_select::call(self_, mask_); |
11530 | } |
11531 | at::functionalization::impl::replace_(out, tmp_output); |
11532 | at::functionalization::impl::commit_update(out); |
11533 | at::functionalization::impl::sync(out); |
11534 | return out; |
11535 | } |
11536 | } |
11537 | |
11538 | at::Tensor & linalg_solve_triangular_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) { |
11539 | if (false) { |
11540 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11541 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11542 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11543 | auto self_meta = to_meta(self); |
11544 | auto B_meta = to_meta(B); |
11545 | auto out_meta = to_meta(out); |
11546 | at::AutoDispatchSkipFunctionalize func_guard; |
11547 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11548 | at::_ops::linalg_solve_triangular_out::call(self_meta, B_meta, upper, left, unitriangular, out_meta); |
11549 | } |
11550 | |
11551 | at::Tensor self_; |
11552 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11553 | at::functionalization::impl::sync(self); |
11554 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11555 | } else { |
11556 | self_ = self; |
11557 | } |
11558 | |
11559 | at::Tensor B_; |
11560 | if (at::functionalization::impl::isFunctionalTensor(B)) { |
11561 | at::functionalization::impl::sync(B); |
11562 | B_ = at::functionalization::impl::from_functional_tensor(B); |
11563 | } else { |
11564 | B_ = B; |
11565 | } |
11566 | |
11567 | at::Tensor out_; |
11568 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11569 | at::functionalization::impl::sync(out); |
11570 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11571 | } else { |
11572 | out_ = out; |
11573 | } |
11574 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11575 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(B))) { |
11576 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11577 | TORCH_INTERNAL_ASSERT(false, |
11578 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11579 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11580 | } else { |
11581 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11582 | at::AutoDispatchSkipFunctionalize guard; |
11583 | at::Tensor tmp_output = at::_ops::linalg_solve_triangular_out::call(self_, B_, upper, left, unitriangular, out_); |
11584 | return out;; |
11585 | } |
11586 | } else { |
11587 | at::Tensor tmp_output; |
11588 | { |
11589 | at::AutoDispatchSkipFunctionalize guard; |
11590 | tmp_output = at::_ops::linalg_solve_triangular::call(self_, B_, upper, left, unitriangular); |
11591 | } |
11592 | at::functionalization::impl::replace_(out, tmp_output); |
11593 | at::functionalization::impl::commit_update(out); |
11594 | at::functionalization::impl::sync(out); |
11595 | return out; |
11596 | } |
11597 | } |
11598 | |
11599 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> svd_out_U(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) { |
11600 | if (false) { |
11601 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11602 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11603 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11604 | auto self_meta = to_meta(self); |
11605 | auto U_meta = to_meta(U); |
11606 | auto S_meta = to_meta(S); |
11607 | auto V_meta = to_meta(V); |
11608 | at::AutoDispatchSkipFunctionalize func_guard; |
11609 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11610 | at::_ops::svd_U::call(self_meta, some, compute_uv, U_meta, S_meta, V_meta); |
11611 | } |
11612 | |
11613 | at::Tensor self_; |
11614 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11615 | at::functionalization::impl::sync(self); |
11616 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11617 | } else { |
11618 | self_ = self; |
11619 | } |
11620 | |
11621 | at::Tensor U_; |
11622 | if (at::functionalization::impl::isFunctionalTensor(U)) { |
11623 | at::functionalization::impl::sync(U); |
11624 | U_ = at::functionalization::impl::from_functional_tensor(U); |
11625 | } else { |
11626 | U_ = U; |
11627 | } |
11628 | |
11629 | at::Tensor S_; |
11630 | if (at::functionalization::impl::isFunctionalTensor(S)) { |
11631 | at::functionalization::impl::sync(S); |
11632 | S_ = at::functionalization::impl::from_functional_tensor(S); |
11633 | } else { |
11634 | S_ = S; |
11635 | } |
11636 | |
11637 | at::Tensor V_; |
11638 | if (at::functionalization::impl::isFunctionalTensor(V)) { |
11639 | at::functionalization::impl::sync(V); |
11640 | V_ = at::functionalization::impl::from_functional_tensor(V); |
11641 | } else { |
11642 | V_ = V; |
11643 | } |
11644 | if (!(true && at::functionalization::impl::isFunctionalTensor(U) && at::functionalization::impl::isFunctionalTensor(S) && at::functionalization::impl::isFunctionalTensor(V))) { |
11645 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
11646 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11647 | TORCH_INTERNAL_ASSERT(false, |
11648 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11649 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11650 | } else { |
11651 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11652 | at::AutoDispatchSkipFunctionalize guard; |
11653 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::svd_U::call(self_, some, compute_uv, U_, S_, V_); |
11654 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(U, S, V);; |
11655 | } |
11656 | } else { |
11657 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
11658 | { |
11659 | at::AutoDispatchSkipFunctionalize guard; |
11660 | tmp_output = at::_ops::svd::call(self_, some, compute_uv); |
11661 | } |
11662 | at::functionalization::impl::replace_(U, std::get<0>(tmp_output)); |
11663 | at::functionalization::impl::commit_update(U); |
11664 | at::functionalization::impl::sync(U); |
11665 | at::functionalization::impl::replace_(S, std::get<1>(tmp_output)); |
11666 | at::functionalization::impl::commit_update(S); |
11667 | at::functionalization::impl::sync(S); |
11668 | at::functionalization::impl::replace_(V, std::get<2>(tmp_output)); |
11669 | at::functionalization::impl::commit_update(V); |
11670 | at::functionalization::impl::sync(V); |
11671 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(U, S, V); |
11672 | } |
11673 | } |
11674 | |
11675 | at::Tensor & multinomial_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator, at::Tensor & out) { |
11676 | if (false) { |
11677 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11678 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11679 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11680 | auto self_meta = to_meta(self); |
11681 | auto out_meta = to_meta(out); |
11682 | at::AutoDispatchSkipFunctionalize func_guard; |
11683 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11684 | at::_ops::multinomial_out::call(self_meta, num_samples, replacement, generator, out_meta); |
11685 | } |
11686 | |
11687 | at::Tensor self_; |
11688 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11689 | at::functionalization::impl::sync(self); |
11690 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11691 | } else { |
11692 | self_ = self; |
11693 | } |
11694 | |
11695 | at::Tensor out_; |
11696 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11697 | at::functionalization::impl::sync(out); |
11698 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11699 | } else { |
11700 | out_ = out; |
11701 | } |
11702 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11703 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
11704 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11705 | TORCH_INTERNAL_ASSERT(false, |
11706 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11707 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11708 | } else { |
11709 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11710 | at::AutoDispatchSkipFunctionalize guard; |
11711 | at::Tensor tmp_output = at::_ops::multinomial_out::call(self_, num_samples, replacement, generator, out_); |
11712 | return out;; |
11713 | } |
11714 | } else { |
11715 | at::Tensor tmp_output; |
11716 | { |
11717 | at::AutoDispatchSkipFunctionalize guard; |
11718 | tmp_output = at::_ops::multinomial::call(self_, num_samples, replacement, generator); |
11719 | } |
11720 | at::functionalization::impl::replace_(out, tmp_output); |
11721 | at::functionalization::impl::commit_update(out); |
11722 | at::functionalization::impl::sync(out); |
11723 | return out; |
11724 | } |
11725 | } |
11726 | |
11727 | ::std::tuple<at::Tensor &,at::Tensor &> histogram_out_bins_tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) { |
11728 | if (false) { |
11729 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11730 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11731 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11732 | auto self_meta = to_meta(self); |
11733 | auto bins_meta = to_meta(bins); |
11734 | auto weight_meta = to_meta(weight); |
11735 | auto hist_meta = to_meta(hist); |
11736 | auto bin_edges_meta = to_meta(bin_edges); |
11737 | at::AutoDispatchSkipFunctionalize func_guard; |
11738 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11739 | at::_ops::histogram_bins_tensor_out::call(self_meta, bins_meta, weight_meta, density, hist_meta, bin_edges_meta); |
11740 | } |
11741 | |
11742 | at::Tensor self_; |
11743 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11744 | at::functionalization::impl::sync(self); |
11745 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11746 | } else { |
11747 | self_ = self; |
11748 | } |
11749 | |
11750 | at::Tensor bins_; |
11751 | if (at::functionalization::impl::isFunctionalTensor(bins)) { |
11752 | at::functionalization::impl::sync(bins); |
11753 | bins_ = at::functionalization::impl::from_functional_tensor(bins); |
11754 | } else { |
11755 | bins_ = bins; |
11756 | } |
11757 | |
11758 | c10::optional<at::Tensor> weight_; |
11759 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
11760 | at::functionalization::impl::sync(weight); |
11761 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
11762 | } else { |
11763 | weight_ = weight; |
11764 | } |
11765 | |
11766 | at::Tensor hist_; |
11767 | if (at::functionalization::impl::isFunctionalTensor(hist)) { |
11768 | at::functionalization::impl::sync(hist); |
11769 | hist_ = at::functionalization::impl::from_functional_tensor(hist); |
11770 | } else { |
11771 | hist_ = hist; |
11772 | } |
11773 | |
11774 | at::Tensor bin_edges_; |
11775 | if (at::functionalization::impl::isFunctionalTensor(bin_edges)) { |
11776 | at::functionalization::impl::sync(bin_edges); |
11777 | bin_edges_ = at::functionalization::impl::from_functional_tensor(bin_edges); |
11778 | } else { |
11779 | bin_edges_ = bin_edges; |
11780 | } |
11781 | if (!(true && at::functionalization::impl::isFunctionalTensor(hist) && at::functionalization::impl::isFunctionalTensor(bin_edges))) { |
11782 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(bins) || at::functionalization::impl::isFunctionalTensor(weight))) { |
11783 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11784 | TORCH_INTERNAL_ASSERT(false, |
11785 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11786 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11787 | } else { |
11788 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11789 | at::AutoDispatchSkipFunctionalize guard; |
11790 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::histogram_bins_tensor_out::call(self_, bins_, weight_, density, hist_, bin_edges_); |
11791 | return ::std::tuple<at::Tensor &,at::Tensor &>(hist, bin_edges);; |
11792 | } |
11793 | } else { |
11794 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
11795 | { |
11796 | at::AutoDispatchSkipFunctionalize guard; |
11797 | tmp_output = at::_ops::histogram_bins_tensor::call(self_, bins_, weight_, density); |
11798 | } |
11799 | at::functionalization::impl::replace_(hist, std::get<0>(tmp_output)); |
11800 | at::functionalization::impl::commit_update(hist); |
11801 | at::functionalization::impl::sync(hist); |
11802 | at::functionalization::impl::replace_(bin_edges, std::get<1>(tmp_output)); |
11803 | at::functionalization::impl::commit_update(bin_edges); |
11804 | at::functionalization::impl::sync(bin_edges); |
11805 | return ::std::tuple<at::Tensor &,at::Tensor &>(hist, bin_edges); |
11806 | } |
11807 | } |
11808 | |
11809 | ::std::tuple<at::Tensor &,at::Tensor &> histogram_out_bin_ct_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) { |
11810 | if (false) { |
11811 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11812 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11813 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11814 | auto self_meta = to_meta(self); |
11815 | auto weight_meta = to_meta(weight); |
11816 | auto hist_meta = to_meta(hist); |
11817 | auto bin_edges_meta = to_meta(bin_edges); |
11818 | at::AutoDispatchSkipFunctionalize func_guard; |
11819 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11820 | at::_ops::histogram_bin_ct_out::call(self_meta, bins, range, weight_meta, density, hist_meta, bin_edges_meta); |
11821 | } |
11822 | |
11823 | at::Tensor self_; |
11824 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11825 | at::functionalization::impl::sync(self); |
11826 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11827 | } else { |
11828 | self_ = self; |
11829 | } |
11830 | |
11831 | c10::optional<at::Tensor> weight_; |
11832 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
11833 | at::functionalization::impl::sync(weight); |
11834 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
11835 | } else { |
11836 | weight_ = weight; |
11837 | } |
11838 | |
11839 | at::Tensor hist_; |
11840 | if (at::functionalization::impl::isFunctionalTensor(hist)) { |
11841 | at::functionalization::impl::sync(hist); |
11842 | hist_ = at::functionalization::impl::from_functional_tensor(hist); |
11843 | } else { |
11844 | hist_ = hist; |
11845 | } |
11846 | |
11847 | at::Tensor bin_edges_; |
11848 | if (at::functionalization::impl::isFunctionalTensor(bin_edges)) { |
11849 | at::functionalization::impl::sync(bin_edges); |
11850 | bin_edges_ = at::functionalization::impl::from_functional_tensor(bin_edges); |
11851 | } else { |
11852 | bin_edges_ = bin_edges; |
11853 | } |
11854 | if (!(true && at::functionalization::impl::isFunctionalTensor(hist) && at::functionalization::impl::isFunctionalTensor(bin_edges))) { |
11855 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) { |
11856 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11857 | TORCH_INTERNAL_ASSERT(false, |
11858 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11859 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11860 | } else { |
11861 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11862 | at::AutoDispatchSkipFunctionalize guard; |
11863 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::histogram_bin_ct_out::call(self_, bins, range, weight_, density, hist_, bin_edges_); |
11864 | return ::std::tuple<at::Tensor &,at::Tensor &>(hist, bin_edges);; |
11865 | } |
11866 | } else { |
11867 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
11868 | { |
11869 | at::AutoDispatchSkipFunctionalize guard; |
11870 | tmp_output = at::_ops::histogram_bin_ct::call(self_, bins, range, weight_, density); |
11871 | } |
11872 | at::functionalization::impl::replace_(hist, std::get<0>(tmp_output)); |
11873 | at::functionalization::impl::commit_update(hist); |
11874 | at::functionalization::impl::sync(hist); |
11875 | at::functionalization::impl::replace_(bin_edges, std::get<1>(tmp_output)); |
11876 | at::functionalization::impl::commit_update(bin_edges); |
11877 | at::functionalization::impl::sync(bin_edges); |
11878 | return ::std::tuple<at::Tensor &,at::Tensor &>(hist, bin_edges); |
11879 | } |
11880 | } |
11881 | |
11882 | at::Tensor & igammac_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11883 | if (false) { |
11884 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11885 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11886 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11887 | auto self_meta = to_meta(self); |
11888 | auto other_meta = to_meta(other); |
11889 | auto out_meta = to_meta(out); |
11890 | at::AutoDispatchSkipFunctionalize func_guard; |
11891 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11892 | at::_ops::igammac_out::call(self_meta, other_meta, out_meta); |
11893 | } |
11894 | |
11895 | at::Tensor self_; |
11896 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11897 | at::functionalization::impl::sync(self); |
11898 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11899 | } else { |
11900 | self_ = self; |
11901 | } |
11902 | |
11903 | at::Tensor other_; |
11904 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
11905 | at::functionalization::impl::sync(other); |
11906 | other_ = at::functionalization::impl::from_functional_tensor(other); |
11907 | } else { |
11908 | other_ = other; |
11909 | } |
11910 | |
11911 | at::Tensor out_; |
11912 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11913 | at::functionalization::impl::sync(out); |
11914 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11915 | } else { |
11916 | out_ = out; |
11917 | } |
11918 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11919 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
11920 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11921 | TORCH_INTERNAL_ASSERT(false, |
11922 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11923 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11924 | } else { |
11925 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11926 | at::AutoDispatchSkipFunctionalize guard; |
11927 | at::Tensor tmp_output = at::_ops::igammac_out::call(self_, other_, out_); |
11928 | return out;; |
11929 | } |
11930 | } else { |
11931 | at::Tensor tmp_output; |
11932 | { |
11933 | at::AutoDispatchSkipFunctionalize guard; |
11934 | tmp_output = at::_ops::igammac::call(self_, other_); |
11935 | } |
11936 | at::functionalization::impl::replace_(out, tmp_output); |
11937 | at::functionalization::impl::commit_update(out); |
11938 | at::functionalization::impl::sync(out); |
11939 | return out; |
11940 | } |
11941 | } |
11942 | |
11943 | at::Tensor & igammac_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
11944 | if (true) { |
11945 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11946 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11947 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11948 | auto self_meta = to_meta(self); |
11949 | auto other_meta = to_meta(other); |
11950 | at::AutoDispatchSkipFunctionalize func_guard; |
11951 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11952 | at::_ops::igammac_::call(self_meta, other_meta); |
11953 | } |
11954 | |
11955 | at::Tensor self_; |
11956 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11957 | at::functionalization::impl::sync(self); |
11958 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11959 | } else { |
11960 | self_ = self; |
11961 | } |
11962 | |
11963 | at::Tensor other_; |
11964 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
11965 | at::functionalization::impl::sync(other); |
11966 | other_ = at::functionalization::impl::from_functional_tensor(other); |
11967 | } else { |
11968 | other_ = other; |
11969 | } |
11970 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11971 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
11972 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11973 | TORCH_INTERNAL_ASSERT(false, |
11974 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11975 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11976 | } else { |
11977 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11978 | at::AutoDispatchSkipFunctionalize guard; |
11979 | at::Tensor tmp_output = at::_ops::igammac_::call(self_, other_); |
11980 | return self;; |
11981 | } |
11982 | } else { |
11983 | at::Tensor tmp_output; |
11984 | { |
11985 | at::AutoDispatchSkipFunctionalize guard; |
11986 | tmp_output = at::_ops::igammac::call(self_, other_); |
11987 | } |
11988 | at::functionalization::impl::replace_(self, tmp_output); |
11989 | at::functionalization::impl::commit_update(self); |
11990 | at::functionalization::impl::sync(self); |
11991 | return self; |
11992 | } |
11993 | } |
11994 | |
11995 | at::Tensor & remainder_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11996 | if (false) { |
11997 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11998 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11999 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12000 | auto self_meta = to_meta(self); |
12001 | auto out_meta = to_meta(out); |
12002 | at::AutoDispatchSkipFunctionalize func_guard; |
12003 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12004 | at::_ops::remainder_Scalar_out::call(self_meta, other, out_meta); |
12005 | } |
12006 | |
12007 | at::Tensor self_; |
12008 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12009 | at::functionalization::impl::sync(self); |
12010 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12011 | } else { |
12012 | self_ = self; |
12013 | } |
12014 | |
12015 | at::Tensor out_; |
12016 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12017 | at::functionalization::impl::sync(out); |
12018 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12019 | } else { |
12020 | out_ = out; |
12021 | } |
12022 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12023 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12024 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12025 | TORCH_INTERNAL_ASSERT(false, |
12026 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12027 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12028 | } else { |
12029 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12030 | at::AutoDispatchSkipFunctionalize guard; |
12031 | at::Tensor tmp_output = at::_ops::remainder_Scalar_out::call(self_, other, out_); |
12032 | return out;; |
12033 | } |
12034 | } else { |
12035 | at::Tensor tmp_output; |
12036 | { |
12037 | at::AutoDispatchSkipFunctionalize guard; |
12038 | tmp_output = at::_ops::remainder_Scalar::call(self_, other); |
12039 | } |
12040 | at::functionalization::impl::replace_(out, tmp_output); |
12041 | at::functionalization::impl::commit_update(out); |
12042 | at::functionalization::impl::sync(out); |
12043 | return out; |
12044 | } |
12045 | } |
12046 | |
12047 | at::Tensor & remainder__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
12048 | if (true) { |
12049 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12050 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12051 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12052 | auto self_meta = to_meta(self); |
12053 | at::AutoDispatchSkipFunctionalize func_guard; |
12054 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12055 | at::_ops::remainder__Scalar::call(self_meta, other); |
12056 | } |
12057 | |
12058 | at::Tensor self_; |
12059 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12060 | at::functionalization::impl::sync(self); |
12061 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12062 | } else { |
12063 | self_ = self; |
12064 | } |
12065 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12066 | if ((false)) { |
12067 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12068 | TORCH_INTERNAL_ASSERT(false, |
12069 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12070 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12071 | } else { |
12072 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12073 | at::AutoDispatchSkipFunctionalize guard; |
12074 | at::Tensor tmp_output = at::_ops::remainder__Scalar::call(self_, other); |
12075 | return self;; |
12076 | } |
12077 | } else { |
12078 | at::Tensor tmp_output; |
12079 | { |
12080 | at::AutoDispatchSkipFunctionalize guard; |
12081 | tmp_output = at::_ops::remainder_Scalar::call(self_, other); |
12082 | } |
12083 | at::functionalization::impl::replace_(self, tmp_output); |
12084 | at::functionalization::impl::commit_update(self); |
12085 | at::functionalization::impl::sync(self); |
12086 | return self; |
12087 | } |
12088 | } |
12089 | |
12090 | at::Tensor & remainder_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
12091 | if (false) { |
12092 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12093 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12094 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12095 | auto self_meta = to_meta(self); |
12096 | auto other_meta = to_meta(other); |
12097 | auto out_meta = to_meta(out); |
12098 | at::AutoDispatchSkipFunctionalize func_guard; |
12099 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12100 | at::_ops::remainder_Tensor_out::call(self_meta, other_meta, out_meta); |
12101 | } |
12102 | |
12103 | at::Tensor self_; |
12104 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12105 | at::functionalization::impl::sync(self); |
12106 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12107 | } else { |
12108 | self_ = self; |
12109 | } |
12110 | |
12111 | at::Tensor other_; |
12112 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
12113 | at::functionalization::impl::sync(other); |
12114 | other_ = at::functionalization::impl::from_functional_tensor(other); |
12115 | } else { |
12116 | other_ = other; |
12117 | } |
12118 | |
12119 | at::Tensor out_; |
12120 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12121 | at::functionalization::impl::sync(out); |
12122 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12123 | } else { |
12124 | out_ = out; |
12125 | } |
12126 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12127 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
12128 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12129 | TORCH_INTERNAL_ASSERT(false, |
12130 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12131 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12132 | } else { |
12133 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12134 | at::AutoDispatchSkipFunctionalize guard; |
12135 | at::Tensor tmp_output = at::_ops::remainder_Tensor_out::call(self_, other_, out_); |
12136 | return out;; |
12137 | } |
12138 | } else { |
12139 | at::Tensor tmp_output; |
12140 | { |
12141 | at::AutoDispatchSkipFunctionalize guard; |
12142 | tmp_output = at::_ops::remainder_Tensor::call(self_, other_); |
12143 | } |
12144 | at::functionalization::impl::replace_(out, tmp_output); |
12145 | at::functionalization::impl::commit_update(out); |
12146 | at::functionalization::impl::sync(out); |
12147 | return out; |
12148 | } |
12149 | } |
12150 | |
12151 | at::Tensor & remainder__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
12152 | if (true) { |
12153 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12154 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12155 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12156 | auto self_meta = to_meta(self); |
12157 | auto other_meta = to_meta(other); |
12158 | at::AutoDispatchSkipFunctionalize func_guard; |
12159 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12160 | at::_ops::remainder__Tensor::call(self_meta, other_meta); |
12161 | } |
12162 | |
12163 | at::Tensor self_; |
12164 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12165 | at::functionalization::impl::sync(self); |
12166 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12167 | } else { |
12168 | self_ = self; |
12169 | } |
12170 | |
12171 | at::Tensor other_; |
12172 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
12173 | at::functionalization::impl::sync(other); |
12174 | other_ = at::functionalization::impl::from_functional_tensor(other); |
12175 | } else { |
12176 | other_ = other; |
12177 | } |
12178 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12179 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
12180 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12181 | TORCH_INTERNAL_ASSERT(false, |
12182 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12183 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12184 | } else { |
12185 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12186 | at::AutoDispatchSkipFunctionalize guard; |
12187 | at::Tensor tmp_output = at::_ops::remainder__Tensor::call(self_, other_); |
12188 | return self;; |
12189 | } |
12190 | } else { |
12191 | at::Tensor tmp_output; |
12192 | { |
12193 | at::AutoDispatchSkipFunctionalize guard; |
12194 | tmp_output = at::_ops::remainder_Tensor::call(self_, other_); |
12195 | } |
12196 | at::functionalization::impl::replace_(self, tmp_output); |
12197 | at::functionalization::impl::commit_update(self); |
12198 | at::functionalization::impl::sync(self); |
12199 | return self; |
12200 | } |
12201 | } |
12202 | |
12203 | at::Tensor & remainder_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
12204 | if (false) { |
12205 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12206 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12207 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12208 | auto other_meta = to_meta(other); |
12209 | auto out_meta = to_meta(out); |
12210 | at::AutoDispatchSkipFunctionalize func_guard; |
12211 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12212 | at::_ops::remainder_Scalar_Tensor_out::call(self, other_meta, out_meta); |
12213 | } |
12214 | |
12215 | at::Tensor other_; |
12216 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
12217 | at::functionalization::impl::sync(other); |
12218 | other_ = at::functionalization::impl::from_functional_tensor(other); |
12219 | } else { |
12220 | other_ = other; |
12221 | } |
12222 | |
12223 | at::Tensor out_; |
12224 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12225 | at::functionalization::impl::sync(out); |
12226 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12227 | } else { |
12228 | out_ = out; |
12229 | } |
12230 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12231 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
12232 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12233 | TORCH_INTERNAL_ASSERT(false, |
12234 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12235 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12236 | } else { |
12237 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12238 | at::AutoDispatchSkipFunctionalize guard; |
12239 | at::Tensor tmp_output = at::_ops::remainder_Scalar_Tensor_out::call(self, other_, out_); |
12240 | return out;; |
12241 | } |
12242 | } else { |
12243 | at::Tensor tmp_output; |
12244 | { |
12245 | at::AutoDispatchSkipFunctionalize guard; |
12246 | tmp_output = at::_ops::remainder_Scalar_Tensor::call(self, other_); |
12247 | } |
12248 | at::functionalization::impl::replace_(out, tmp_output); |
12249 | at::functionalization::impl::commit_update(out); |
12250 | at::functionalization::impl::sync(out); |
12251 | return out; |
12252 | } |
12253 | } |
12254 | |
12255 | at::Tensor & quantile_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { |
12256 | if (false) { |
12257 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12258 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12259 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12260 | auto self_meta = to_meta(self); |
12261 | auto q_meta = to_meta(q); |
12262 | auto out_meta = to_meta(out); |
12263 | at::AutoDispatchSkipFunctionalize func_guard; |
12264 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12265 | at::_ops::quantile_out::call(self_meta, q_meta, dim, keepdim, interpolation, out_meta); |
12266 | } |
12267 | |
12268 | at::Tensor self_; |
12269 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12270 | at::functionalization::impl::sync(self); |
12271 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12272 | } else { |
12273 | self_ = self; |
12274 | } |
12275 | |
12276 | at::Tensor q_; |
12277 | if (at::functionalization::impl::isFunctionalTensor(q)) { |
12278 | at::functionalization::impl::sync(q); |
12279 | q_ = at::functionalization::impl::from_functional_tensor(q); |
12280 | } else { |
12281 | q_ = q; |
12282 | } |
12283 | |
12284 | at::Tensor out_; |
12285 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12286 | at::functionalization::impl::sync(out); |
12287 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12288 | } else { |
12289 | out_ = out; |
12290 | } |
12291 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12292 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(q))) { |
12293 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12294 | TORCH_INTERNAL_ASSERT(false, |
12295 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12296 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12297 | } else { |
12298 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12299 | at::AutoDispatchSkipFunctionalize guard; |
12300 | at::Tensor tmp_output = at::_ops::quantile_out::call(self_, q_, dim, keepdim, interpolation, out_); |
12301 | return out;; |
12302 | } |
12303 | } else { |
12304 | at::Tensor tmp_output; |
12305 | { |
12306 | at::AutoDispatchSkipFunctionalize guard; |
12307 | tmp_output = at::_ops::quantile::call(self_, q_, dim, keepdim, interpolation); |
12308 | } |
12309 | at::functionalization::impl::replace_(out, tmp_output); |
12310 | at::functionalization::impl::commit_update(out); |
12311 | at::functionalization::impl::sync(out); |
12312 | return out; |
12313 | } |
12314 | } |
12315 | |
12316 | at::Tensor & quantile_out_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { |
12317 | if (false) { |
12318 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12319 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12320 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12321 | auto self_meta = to_meta(self); |
12322 | auto out_meta = to_meta(out); |
12323 | at::AutoDispatchSkipFunctionalize func_guard; |
12324 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12325 | at::_ops::quantile_scalar_out::call(self_meta, q, dim, keepdim, interpolation, out_meta); |
12326 | } |
12327 | |
12328 | at::Tensor self_; |
12329 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12330 | at::functionalization::impl::sync(self); |
12331 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12332 | } else { |
12333 | self_ = self; |
12334 | } |
12335 | |
12336 | at::Tensor out_; |
12337 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12338 | at::functionalization::impl::sync(out); |
12339 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12340 | } else { |
12341 | out_ = out; |
12342 | } |
12343 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12344 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12345 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12346 | TORCH_INTERNAL_ASSERT(false, |
12347 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12348 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12349 | } else { |
12350 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12351 | at::AutoDispatchSkipFunctionalize guard; |
12352 | at::Tensor tmp_output = at::_ops::quantile_scalar_out::call(self_, q, dim, keepdim, interpolation, out_); |
12353 | return out;; |
12354 | } |
12355 | } else { |
12356 | at::Tensor tmp_output; |
12357 | { |
12358 | at::AutoDispatchSkipFunctionalize guard; |
12359 | tmp_output = at::_ops::quantile_scalar::call(self_, q, dim, keepdim, interpolation); |
12360 | } |
12361 | at::functionalization::impl::replace_(out, tmp_output); |
12362 | at::functionalization::impl::commit_update(out); |
12363 | at::functionalization::impl::sync(out); |
12364 | return out; |
12365 | } |
12366 | } |
12367 | |
12368 | at::Tensor & nanquantile_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { |
12369 | if (false) { |
12370 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12371 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12372 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12373 | auto self_meta = to_meta(self); |
12374 | auto q_meta = to_meta(q); |
12375 | auto out_meta = to_meta(out); |
12376 | at::AutoDispatchSkipFunctionalize func_guard; |
12377 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12378 | at::_ops::nanquantile_out::call(self_meta, q_meta, dim, keepdim, interpolation, out_meta); |
12379 | } |
12380 | |
12381 | at::Tensor self_; |
12382 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12383 | at::functionalization::impl::sync(self); |
12384 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12385 | } else { |
12386 | self_ = self; |
12387 | } |
12388 | |
12389 | at::Tensor q_; |
12390 | if (at::functionalization::impl::isFunctionalTensor(q)) { |
12391 | at::functionalization::impl::sync(q); |
12392 | q_ = at::functionalization::impl::from_functional_tensor(q); |
12393 | } else { |
12394 | q_ = q; |
12395 | } |
12396 | |
12397 | at::Tensor out_; |
12398 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12399 | at::functionalization::impl::sync(out); |
12400 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12401 | } else { |
12402 | out_ = out; |
12403 | } |
12404 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12405 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(q))) { |
12406 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12407 | TORCH_INTERNAL_ASSERT(false, |
12408 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12409 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12410 | } else { |
12411 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12412 | at::AutoDispatchSkipFunctionalize guard; |
12413 | at::Tensor tmp_output = at::_ops::nanquantile_out::call(self_, q_, dim, keepdim, interpolation, out_); |
12414 | return out;; |
12415 | } |
12416 | } else { |
12417 | at::Tensor tmp_output; |
12418 | { |
12419 | at::AutoDispatchSkipFunctionalize guard; |
12420 | tmp_output = at::_ops::nanquantile::call(self_, q_, dim, keepdim, interpolation); |
12421 | } |
12422 | at::functionalization::impl::replace_(out, tmp_output); |
12423 | at::functionalization::impl::commit_update(out); |
12424 | at::functionalization::impl::sync(out); |
12425 | return out; |
12426 | } |
12427 | } |
12428 | |
12429 | at::Tensor & nanquantile_out_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { |
12430 | if (false) { |
12431 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12432 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12433 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12434 | auto self_meta = to_meta(self); |
12435 | auto out_meta = to_meta(out); |
12436 | at::AutoDispatchSkipFunctionalize func_guard; |
12437 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12438 | at::_ops::nanquantile_scalar_out::call(self_meta, q, dim, keepdim, interpolation, out_meta); |
12439 | } |
12440 | |
12441 | at::Tensor self_; |
12442 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12443 | at::functionalization::impl::sync(self); |
12444 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12445 | } else { |
12446 | self_ = self; |
12447 | } |
12448 | |
12449 | at::Tensor out_; |
12450 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12451 | at::functionalization::impl::sync(out); |
12452 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12453 | } else { |
12454 | out_ = out; |
12455 | } |
12456 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12457 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12458 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12459 | TORCH_INTERNAL_ASSERT(false, |
12460 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12461 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12462 | } else { |
12463 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12464 | at::AutoDispatchSkipFunctionalize guard; |
12465 | at::Tensor tmp_output = at::_ops::nanquantile_scalar_out::call(self_, q, dim, keepdim, interpolation, out_); |
12466 | return out;; |
12467 | } |
12468 | } else { |
12469 | at::Tensor tmp_output; |
12470 | { |
12471 | at::AutoDispatchSkipFunctionalize guard; |
12472 | tmp_output = at::_ops::nanquantile_scalar::call(self_, q, dim, keepdim, interpolation); |
12473 | } |
12474 | at::functionalization::impl::replace_(out, tmp_output); |
12475 | at::functionalization::impl::commit_update(out); |
12476 | at::functionalization::impl::sync(out); |
12477 | return out; |
12478 | } |
12479 | } |
12480 | |
12481 | ::std::tuple<at::Tensor &,at::Tensor &> sort_out_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) { |
12482 | if (false) { |
12483 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12484 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12485 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12486 | auto self_meta = to_meta(self); |
12487 | auto values_meta = to_meta(values); |
12488 | auto indices_meta = to_meta(indices); |
12489 | at::AutoDispatchSkipFunctionalize func_guard; |
12490 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12491 | at::_ops::sort_values::call(self_meta, dim, descending, values_meta, indices_meta); |
12492 | } |
12493 | |
12494 | at::Tensor self_; |
12495 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12496 | at::functionalization::impl::sync(self); |
12497 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12498 | } else { |
12499 | self_ = self; |
12500 | } |
12501 | |
12502 | at::Tensor values_; |
12503 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
12504 | at::functionalization::impl::sync(values); |
12505 | values_ = at::functionalization::impl::from_functional_tensor(values); |
12506 | } else { |
12507 | values_ = values; |
12508 | } |
12509 | |
12510 | at::Tensor indices_; |
12511 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
12512 | at::functionalization::impl::sync(indices); |
12513 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
12514 | } else { |
12515 | indices_ = indices; |
12516 | } |
12517 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
12518 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12519 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12520 | TORCH_INTERNAL_ASSERT(false, |
12521 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12522 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12523 | } else { |
12524 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12525 | at::AutoDispatchSkipFunctionalize guard; |
12526 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::sort_values::call(self_, dim, descending, values_, indices_); |
12527 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
12528 | } |
12529 | } else { |
12530 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
12531 | { |
12532 | at::AutoDispatchSkipFunctionalize guard; |
12533 | tmp_output = at::_ops::sort::call(self_, dim, descending); |
12534 | } |
12535 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
12536 | at::functionalization::impl::commit_update(values); |
12537 | at::functionalization::impl::sync(values); |
12538 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
12539 | at::functionalization::impl::commit_update(indices); |
12540 | at::functionalization::impl::sync(indices); |
12541 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
12542 | } |
12543 | } |
12544 | |
12545 | ::std::tuple<at::Tensor &,at::Tensor &> sort_out_values_stable(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) { |
12546 | if (false) { |
12547 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12548 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12549 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12550 | auto self_meta = to_meta(self); |
12551 | auto values_meta = to_meta(values); |
12552 | auto indices_meta = to_meta(indices); |
12553 | at::AutoDispatchSkipFunctionalize func_guard; |
12554 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12555 | at::_ops::sort_values_stable::call(self_meta, stable, dim, descending, values_meta, indices_meta); |
12556 | } |
12557 | |
12558 | at::Tensor self_; |
12559 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12560 | at::functionalization::impl::sync(self); |
12561 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12562 | } else { |
12563 | self_ = self; |
12564 | } |
12565 | |
12566 | at::Tensor values_; |
12567 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
12568 | at::functionalization::impl::sync(values); |
12569 | values_ = at::functionalization::impl::from_functional_tensor(values); |
12570 | } else { |
12571 | values_ = values; |
12572 | } |
12573 | |
12574 | at::Tensor indices_; |
12575 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
12576 | at::functionalization::impl::sync(indices); |
12577 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
12578 | } else { |
12579 | indices_ = indices; |
12580 | } |
12581 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
12582 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12583 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12584 | TORCH_INTERNAL_ASSERT(false, |
12585 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12586 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12587 | } else { |
12588 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12589 | at::AutoDispatchSkipFunctionalize guard; |
12590 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::sort_values_stable::call(self_, stable, dim, descending, values_, indices_); |
12591 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
12592 | } |
12593 | } else { |
12594 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
12595 | { |
12596 | at::AutoDispatchSkipFunctionalize guard; |
12597 | tmp_output = at::_ops::sort_stable::call(self_, stable, dim, descending); |
12598 | } |
12599 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
12600 | at::functionalization::impl::commit_update(values); |
12601 | at::functionalization::impl::sync(values); |
12602 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
12603 | at::functionalization::impl::commit_update(indices); |
12604 | at::functionalization::impl::sync(indices); |
12605 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
12606 | } |
12607 | } |
12608 | |
12609 | ::std::tuple<at::Tensor &,at::Tensor &> sort_out_dimname_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) { |
12610 | if (false) { |
12611 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12612 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12613 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12614 | auto self_meta = to_meta(self); |
12615 | auto values_meta = to_meta(values); |
12616 | auto indices_meta = to_meta(indices); |
12617 | at::AutoDispatchSkipFunctionalize func_guard; |
12618 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12619 | at::_ops::sort_dimname_values::call(self_meta, dim, descending, values_meta, indices_meta); |
12620 | } |
12621 | |
12622 | at::Tensor self_; |
12623 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12624 | at::functionalization::impl::sync(self); |
12625 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12626 | } else { |
12627 | self_ = self; |
12628 | } |
12629 | |
12630 | at::Tensor values_; |
12631 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
12632 | at::functionalization::impl::sync(values); |
12633 | values_ = at::functionalization::impl::from_functional_tensor(values); |
12634 | } else { |
12635 | values_ = values; |
12636 | } |
12637 | |
12638 | at::Tensor indices_; |
12639 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
12640 | at::functionalization::impl::sync(indices); |
12641 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
12642 | } else { |
12643 | indices_ = indices; |
12644 | } |
12645 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
12646 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12647 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12648 | TORCH_INTERNAL_ASSERT(false, |
12649 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12650 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12651 | } else { |
12652 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12653 | at::AutoDispatchSkipFunctionalize guard; |
12654 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::sort_dimname_values::call(self_, dim, descending, values_, indices_); |
12655 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
12656 | } |
12657 | } else { |
12658 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
12659 | { |
12660 | at::AutoDispatchSkipFunctionalize guard; |
12661 | tmp_output = at::_ops::sort_dimname::call(self_, dim, descending); |
12662 | } |
12663 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
12664 | at::functionalization::impl::commit_update(values); |
12665 | at::functionalization::impl::sync(values); |
12666 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
12667 | at::functionalization::impl::commit_update(indices); |
12668 | at::functionalization::impl::sync(indices); |
12669 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
12670 | } |
12671 | } |
12672 | |
12673 | ::std::tuple<at::Tensor &,at::Tensor &> sort_out_dimname_values_stable(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) { |
12674 | if (false) { |
12675 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12676 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12677 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12678 | auto self_meta = to_meta(self); |
12679 | auto values_meta = to_meta(values); |
12680 | auto indices_meta = to_meta(indices); |
12681 | at::AutoDispatchSkipFunctionalize func_guard; |
12682 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12683 | at::_ops::sort_dimname_values_stable::call(self_meta, stable, dim, descending, values_meta, indices_meta); |
12684 | } |
12685 | |
12686 | at::Tensor self_; |
12687 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12688 | at::functionalization::impl::sync(self); |
12689 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12690 | } else { |
12691 | self_ = self; |
12692 | } |
12693 | |
12694 | at::Tensor values_; |
12695 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
12696 | at::functionalization::impl::sync(values); |
12697 | values_ = at::functionalization::impl::from_functional_tensor(values); |
12698 | } else { |
12699 | values_ = values; |
12700 | } |
12701 | |
12702 | at::Tensor indices_; |
12703 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
12704 | at::functionalization::impl::sync(indices); |
12705 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
12706 | } else { |
12707 | indices_ = indices; |
12708 | } |
12709 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
12710 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12711 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12712 | TORCH_INTERNAL_ASSERT(false, |
12713 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12714 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12715 | } else { |
12716 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12717 | at::AutoDispatchSkipFunctionalize guard; |
12718 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::sort_dimname_values_stable::call(self_, stable, dim, descending, values_, indices_); |
12719 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
12720 | } |
12721 | } else { |
12722 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
12723 | { |
12724 | at::AutoDispatchSkipFunctionalize guard; |
12725 | tmp_output = at::_ops::sort_dimname_stable::call(self_, stable, dim, descending); |
12726 | } |
12727 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
12728 | at::functionalization::impl::commit_update(values); |
12729 | at::functionalization::impl::sync(values); |
12730 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
12731 | at::functionalization::impl::commit_update(indices); |
12732 | at::functionalization::impl::sync(indices); |
12733 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
12734 | } |
12735 | } |
12736 | |
12737 | at::Tensor & argsort_out_stable_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out) { |
12738 | if (false) { |
12739 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12740 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12741 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12742 | auto self_meta = to_meta(self); |
12743 | auto out_meta = to_meta(out); |
12744 | at::AutoDispatchSkipFunctionalize func_guard; |
12745 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12746 | at::_ops::argsort_stable_out::call(self_meta, stable, dim, descending, out_meta); |
12747 | } |
12748 | |
12749 | at::Tensor self_; |
12750 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12751 | at::functionalization::impl::sync(self); |
12752 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12753 | } else { |
12754 | self_ = self; |
12755 | } |
12756 | |
12757 | at::Tensor out_; |
12758 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12759 | at::functionalization::impl::sync(out); |
12760 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12761 | } else { |
12762 | out_ = out; |
12763 | } |
12764 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12765 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12766 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12767 | TORCH_INTERNAL_ASSERT(false, |
12768 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12769 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12770 | } else { |
12771 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12772 | at::AutoDispatchSkipFunctionalize guard; |
12773 | at::Tensor tmp_output = at::_ops::argsort_stable_out::call(self_, stable, dim, descending, out_); |
12774 | return out;; |
12775 | } |
12776 | } else { |
12777 | at::Tensor tmp_output; |
12778 | { |
12779 | at::AutoDispatchSkipFunctionalize guard; |
12780 | tmp_output = at::_ops::argsort_stable::call(self_, stable, dim, descending); |
12781 | } |
12782 | at::functionalization::impl::replace_(out, tmp_output); |
12783 | at::functionalization::impl::commit_update(out); |
12784 | at::functionalization::impl::sync(out); |
12785 | return out; |
12786 | } |
12787 | } |
12788 | |
12789 | at::Tensor & all_out_all_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
12790 | if (false) { |
12791 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12792 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12793 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12794 | auto self_meta = to_meta(self); |
12795 | auto out_meta = to_meta(out); |
12796 | at::AutoDispatchSkipFunctionalize func_guard; |
12797 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12798 | at::_ops::all_all_out::call(self_meta, out_meta); |
12799 | } |
12800 | |
12801 | at::Tensor self_; |
12802 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12803 | at::functionalization::impl::sync(self); |
12804 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12805 | } else { |
12806 | self_ = self; |
12807 | } |
12808 | |
12809 | at::Tensor out_; |
12810 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12811 | at::functionalization::impl::sync(out); |
12812 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12813 | } else { |
12814 | out_ = out; |
12815 | } |
12816 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12817 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12818 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12819 | TORCH_INTERNAL_ASSERT(false, |
12820 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12821 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12822 | } else { |
12823 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12824 | at::AutoDispatchSkipFunctionalize guard; |
12825 | at::Tensor tmp_output = at::_ops::all_all_out::call(self_, out_); |
12826 | return out;; |
12827 | } |
12828 | } else { |
12829 | at::Tensor tmp_output; |
12830 | { |
12831 | at::AutoDispatchSkipFunctionalize guard; |
12832 | tmp_output = at::_ops::all::call(self_); |
12833 | } |
12834 | at::functionalization::impl::replace_(out, tmp_output); |
12835 | at::functionalization::impl::commit_update(out); |
12836 | at::functionalization::impl::sync(out); |
12837 | return out; |
12838 | } |
12839 | } |
12840 | |
12841 | at::Tensor & renorm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out) { |
12842 | if (false) { |
12843 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12844 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12845 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12846 | auto self_meta = to_meta(self); |
12847 | auto out_meta = to_meta(out); |
12848 | at::AutoDispatchSkipFunctionalize func_guard; |
12849 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12850 | at::_ops::renorm_out::call(self_meta, p, dim, maxnorm, out_meta); |
12851 | } |
12852 | |
12853 | at::Tensor self_; |
12854 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12855 | at::functionalization::impl::sync(self); |
12856 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12857 | } else { |
12858 | self_ = self; |
12859 | } |
12860 | |
12861 | at::Tensor out_; |
12862 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12863 | at::functionalization::impl::sync(out); |
12864 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12865 | } else { |
12866 | out_ = out; |
12867 | } |
12868 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12869 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12870 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12871 | TORCH_INTERNAL_ASSERT(false, |
12872 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12873 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12874 | } else { |
12875 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12876 | at::AutoDispatchSkipFunctionalize guard; |
12877 | at::Tensor tmp_output = at::_ops::renorm_out::call(self_, p, dim, maxnorm, out_); |
12878 | return out;; |
12879 | } |
12880 | } else { |
12881 | at::Tensor tmp_output; |
12882 | { |
12883 | at::AutoDispatchSkipFunctionalize guard; |
12884 | tmp_output = at::_ops::renorm::call(self_, p, dim, maxnorm); |
12885 | } |
12886 | at::functionalization::impl::replace_(out, tmp_output); |
12887 | at::functionalization::impl::commit_update(out); |
12888 | at::functionalization::impl::sync(out); |
12889 | return out; |
12890 | } |
12891 | } |
12892 | |
12893 | at::Tensor & renorm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) { |
12894 | if (true) { |
12895 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12896 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12897 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12898 | auto self_meta = to_meta(self); |
12899 | at::AutoDispatchSkipFunctionalize func_guard; |
12900 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12901 | at::_ops::renorm_::call(self_meta, p, dim, maxnorm); |
12902 | } |
12903 | |
12904 | at::Tensor self_; |
12905 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12906 | at::functionalization::impl::sync(self); |
12907 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12908 | } else { |
12909 | self_ = self; |
12910 | } |
12911 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12912 | if ((false)) { |
12913 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12914 | TORCH_INTERNAL_ASSERT(false, |
12915 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12916 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12917 | } else { |
12918 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12919 | at::AutoDispatchSkipFunctionalize guard; |
12920 | at::Tensor tmp_output = at::_ops::renorm_::call(self_, p, dim, maxnorm); |
12921 | return self;; |
12922 | } |
12923 | } else { |
12924 | at::Tensor tmp_output; |
12925 | { |
12926 | at::AutoDispatchSkipFunctionalize guard; |
12927 | tmp_output = at::_ops::renorm::call(self_, p, dim, maxnorm); |
12928 | } |
12929 | at::functionalization::impl::replace_(self, tmp_output); |
12930 | at::functionalization::impl::commit_update(self); |
12931 | at::functionalization::impl::sync(self); |
12932 | return self; |
12933 | } |
12934 | } |
12935 | |
12936 | at::Tensor & unfold_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { |
12937 | if (false) { |
12938 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12939 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12940 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12941 | auto grad_in_meta = to_meta(grad_in); |
12942 | auto out_meta = to_meta(out); |
12943 | at::AutoDispatchSkipFunctionalize func_guard; |
12944 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12945 | at::_ops::unfold_backward_out::call(grad_in_meta, input_sizes, dim, size, step, out_meta); |
12946 | } |
12947 | |
12948 | at::Tensor grad_in_; |
12949 | if (at::functionalization::impl::isFunctionalTensor(grad_in)) { |
12950 | at::functionalization::impl::sync(grad_in); |
12951 | grad_in_ = at::functionalization::impl::from_functional_tensor(grad_in); |
12952 | } else { |
12953 | grad_in_ = grad_in; |
12954 | } |
12955 | |
12956 | at::Tensor out_; |
12957 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12958 | at::functionalization::impl::sync(out); |
12959 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12960 | } else { |
12961 | out_ = out; |
12962 | } |
12963 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12964 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_in))) { |
12965 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12966 | TORCH_INTERNAL_ASSERT(false, |
12967 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12968 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12969 | } else { |
12970 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12971 | at::AutoDispatchSkipFunctionalize guard; |
12972 | at::Tensor tmp_output = at::_ops::unfold_backward_out::call(grad_in_, input_sizes, dim, size, step, out_); |
12973 | return out;; |
12974 | } |
12975 | } else { |
12976 | at::Tensor tmp_output; |
12977 | { |
12978 | at::AutoDispatchSkipFunctionalize guard; |
12979 | tmp_output = at::_ops::unfold_backward::call(grad_in_, input_sizes, dim, size, step); |
12980 | } |
12981 | at::functionalization::impl::replace_(out, tmp_output); |
12982 | at::functionalization::impl::commit_update(out); |
12983 | at::functionalization::impl::sync(out); |
12984 | return out; |
12985 | } |
12986 | } |
12987 | |
12988 | at::Tensor & pow_out_Tensor_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) { |
12989 | if (false) { |
12990 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12991 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12992 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12993 | auto self_meta = to_meta(self); |
12994 | auto exponent_meta = to_meta(exponent); |
12995 | auto out_meta = to_meta(out); |
12996 | at::AutoDispatchSkipFunctionalize func_guard; |
12997 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12998 | at::_ops::pow_Tensor_Tensor_out::call(self_meta, exponent_meta, out_meta); |
12999 | } |
13000 | |
13001 | at::Tensor self_; |
13002 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13003 | at::functionalization::impl::sync(self); |
13004 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13005 | } else { |
13006 | self_ = self; |
13007 | } |
13008 | |
13009 | at::Tensor exponent_; |
13010 | if (at::functionalization::impl::isFunctionalTensor(exponent)) { |
13011 | at::functionalization::impl::sync(exponent); |
13012 | exponent_ = at::functionalization::impl::from_functional_tensor(exponent); |
13013 | } else { |
13014 | exponent_ = exponent; |
13015 | } |
13016 | |
13017 | at::Tensor out_; |
13018 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13019 | at::functionalization::impl::sync(out); |
13020 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13021 | } else { |
13022 | out_ = out; |
13023 | } |
13024 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13025 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(exponent))) { |
13026 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13027 | TORCH_INTERNAL_ASSERT(false, |
13028 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13029 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13030 | } else { |
13031 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13032 | at::AutoDispatchSkipFunctionalize guard; |
13033 | at::Tensor tmp_output = at::_ops::pow_Tensor_Tensor_out::call(self_, exponent_, out_); |
13034 | return out;; |
13035 | } |
13036 | } else { |
13037 | at::Tensor tmp_output; |
13038 | { |
13039 | at::AutoDispatchSkipFunctionalize guard; |
13040 | tmp_output = at::_ops::pow_Tensor_Tensor::call(self_, exponent_); |
13041 | } |
13042 | at::functionalization::impl::replace_(out, tmp_output); |
13043 | at::functionalization::impl::commit_update(out); |
13044 | at::functionalization::impl::sync(out); |
13045 | return out; |
13046 | } |
13047 | } |
13048 | |
13049 | at::Tensor & pow__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent) { |
13050 | if (true) { |
13051 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13052 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13053 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13054 | auto self_meta = to_meta(self); |
13055 | auto exponent_meta = to_meta(exponent); |
13056 | at::AutoDispatchSkipFunctionalize func_guard; |
13057 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13058 | at::_ops::pow__Tensor::call(self_meta, exponent_meta); |
13059 | } |
13060 | |
13061 | at::Tensor self_; |
13062 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13063 | at::functionalization::impl::sync(self); |
13064 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13065 | } else { |
13066 | self_ = self; |
13067 | } |
13068 | |
13069 | at::Tensor exponent_; |
13070 | if (at::functionalization::impl::isFunctionalTensor(exponent)) { |
13071 | at::functionalization::impl::sync(exponent); |
13072 | exponent_ = at::functionalization::impl::from_functional_tensor(exponent); |
13073 | } else { |
13074 | exponent_ = exponent; |
13075 | } |
13076 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13077 | if ((false || at::functionalization::impl::isFunctionalTensor(exponent))) { |
13078 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13079 | TORCH_INTERNAL_ASSERT(false, |
13080 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13081 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13082 | } else { |
13083 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13084 | at::AutoDispatchSkipFunctionalize guard; |
13085 | at::Tensor tmp_output = at::_ops::pow__Tensor::call(self_, exponent_); |
13086 | return self;; |
13087 | } |
13088 | } else { |
13089 | at::Tensor tmp_output; |
13090 | { |
13091 | at::AutoDispatchSkipFunctionalize guard; |
13092 | tmp_output = at::_ops::pow_Tensor_Tensor::call(self_, exponent_); |
13093 | } |
13094 | at::functionalization::impl::replace_(self, tmp_output); |
13095 | at::functionalization::impl::commit_update(self); |
13096 | at::functionalization::impl::sync(self); |
13097 | return self; |
13098 | } |
13099 | } |
13100 | |
13101 | at::Tensor & pow_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) { |
13102 | if (false) { |
13103 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13104 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13105 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13106 | auto exponent_meta = to_meta(exponent); |
13107 | auto out_meta = to_meta(out); |
13108 | at::AutoDispatchSkipFunctionalize func_guard; |
13109 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13110 | at::_ops::pow_Scalar_out::call(self, exponent_meta, out_meta); |
13111 | } |
13112 | |
13113 | at::Tensor exponent_; |
13114 | if (at::functionalization::impl::isFunctionalTensor(exponent)) { |
13115 | at::functionalization::impl::sync(exponent); |
13116 | exponent_ = at::functionalization::impl::from_functional_tensor(exponent); |
13117 | } else { |
13118 | exponent_ = exponent; |
13119 | } |
13120 | |
13121 | at::Tensor out_; |
13122 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13123 | at::functionalization::impl::sync(out); |
13124 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13125 | } else { |
13126 | out_ = out; |
13127 | } |
13128 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13129 | if ((false || at::functionalization::impl::isFunctionalTensor(exponent))) { |
13130 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13131 | TORCH_INTERNAL_ASSERT(false, |
13132 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13133 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13134 | } else { |
13135 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13136 | at::AutoDispatchSkipFunctionalize guard; |
13137 | at::Tensor tmp_output = at::_ops::pow_Scalar_out::call(self, exponent_, out_); |
13138 | return out;; |
13139 | } |
13140 | } else { |
13141 | at::Tensor tmp_output; |
13142 | { |
13143 | at::AutoDispatchSkipFunctionalize guard; |
13144 | tmp_output = at::_ops::pow_Scalar::call(self, exponent_); |
13145 | } |
13146 | at::functionalization::impl::replace_(out, tmp_output); |
13147 | at::functionalization::impl::commit_update(out); |
13148 | at::functionalization::impl::sync(out); |
13149 | return out; |
13150 | } |
13151 | } |
13152 | |
13153 | at::Tensor & pow_out_Tensor_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) { |
13154 | if (false) { |
13155 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13156 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13157 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13158 | auto self_meta = to_meta(self); |
13159 | auto out_meta = to_meta(out); |
13160 | at::AutoDispatchSkipFunctionalize func_guard; |
13161 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13162 | at::_ops::pow_Tensor_Scalar_out::call(self_meta, exponent, out_meta); |
13163 | } |
13164 | |
13165 | at::Tensor self_; |
13166 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13167 | at::functionalization::impl::sync(self); |
13168 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13169 | } else { |
13170 | self_ = self; |
13171 | } |
13172 | |
13173 | at::Tensor out_; |
13174 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13175 | at::functionalization::impl::sync(out); |
13176 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13177 | } else { |
13178 | out_ = out; |
13179 | } |
13180 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13181 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
13182 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13183 | TORCH_INTERNAL_ASSERT(false, |
13184 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13185 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13186 | } else { |
13187 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13188 | at::AutoDispatchSkipFunctionalize guard; |
13189 | at::Tensor tmp_output = at::_ops::pow_Tensor_Scalar_out::call(self_, exponent, out_); |
13190 | return out;; |
13191 | } |
13192 | } else { |
13193 | at::Tensor tmp_output; |
13194 | { |
13195 | at::AutoDispatchSkipFunctionalize guard; |
13196 | tmp_output = at::_ops::pow_Tensor_Scalar::call(self_, exponent); |
13197 | } |
13198 | at::functionalization::impl::replace_(out, tmp_output); |
13199 | at::functionalization::impl::commit_update(out); |
13200 | at::functionalization::impl::sync(out); |
13201 | return out; |
13202 | } |
13203 | } |
13204 | |
13205 | at::Tensor & pow__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent) { |
13206 | if (true) { |
13207 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13208 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13209 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13210 | auto self_meta = to_meta(self); |
13211 | at::AutoDispatchSkipFunctionalize func_guard; |
13212 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13213 | at::_ops::pow__Scalar::call(self_meta, exponent); |
13214 | } |
13215 | |
13216 | at::Tensor self_; |
13217 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13218 | at::functionalization::impl::sync(self); |
13219 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13220 | } else { |
13221 | self_ = self; |
13222 | } |
13223 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13224 | if ((false)) { |
13225 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13226 | TORCH_INTERNAL_ASSERT(false, |
13227 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13228 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13229 | } else { |
13230 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13231 | at::AutoDispatchSkipFunctionalize guard; |
13232 | at::Tensor tmp_output = at::_ops::pow__Scalar::call(self_, exponent); |
13233 | return self;; |
13234 | } |
13235 | } else { |
13236 | at::Tensor tmp_output; |
13237 | { |
13238 | at::AutoDispatchSkipFunctionalize guard; |
13239 | tmp_output = at::_ops::pow_Tensor_Scalar::call(self_, exponent); |
13240 | } |
13241 | at::functionalization::impl::replace_(self, tmp_output); |
13242 | at::functionalization::impl::commit_update(self); |
13243 | at::functionalization::impl::sync(self); |
13244 | return self; |
13245 | } |
13246 | } |
13247 | |
13248 | at::Tensor & float_power_out_Tensor_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) { |
13249 | if (false) { |
13250 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13251 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13252 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13253 | auto self_meta = to_meta(self); |
13254 | auto exponent_meta = to_meta(exponent); |
13255 | auto out_meta = to_meta(out); |
13256 | at::AutoDispatchSkipFunctionalize func_guard; |
13257 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13258 | at::_ops::float_power_Tensor_Tensor_out::call(self_meta, exponent_meta, out_meta); |
13259 | } |
13260 | |
13261 | at::Tensor self_; |
13262 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13263 | at::functionalization::impl::sync(self); |
13264 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13265 | } else { |
13266 | self_ = self; |
13267 | } |
13268 | |
13269 | at::Tensor exponent_; |
13270 | if (at::functionalization::impl::isFunctionalTensor(exponent)) { |
13271 | at::functionalization::impl::sync(exponent); |
13272 | exponent_ = at::functionalization::impl::from_functional_tensor(exponent); |
13273 | } else { |
13274 | exponent_ = exponent; |
13275 | } |
13276 | |
13277 | at::Tensor out_; |
13278 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13279 | at::functionalization::impl::sync(out); |
13280 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13281 | } else { |
13282 | out_ = out; |
13283 | } |
13284 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13285 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(exponent))) { |
13286 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13287 | TORCH_INTERNAL_ASSERT(false, |
13288 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13289 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13290 | } else { |
13291 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13292 | at::AutoDispatchSkipFunctionalize guard; |
13293 | at::Tensor tmp_output = at::_ops::float_power_Tensor_Tensor_out::call(self_, exponent_, out_); |
13294 | return out;; |
13295 | } |
13296 | } else { |
13297 | at::Tensor tmp_output; |
13298 | { |
13299 | at::AutoDispatchSkipFunctionalize guard; |
13300 | tmp_output = at::_ops::float_power_Tensor_Tensor::call(self_, exponent_); |
13301 | } |
13302 | at::functionalization::impl::replace_(out, tmp_output); |
13303 | at::functionalization::impl::commit_update(out); |
13304 | at::functionalization::impl::sync(out); |
13305 | return out; |
13306 | } |
13307 | } |
13308 | |
13309 | at::Tensor & float_power__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent) { |
13310 | if (true) { |
13311 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13312 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13313 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13314 | auto self_meta = to_meta(self); |
13315 | auto exponent_meta = to_meta(exponent); |
13316 | at::AutoDispatchSkipFunctionalize func_guard; |
13317 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13318 | at::_ops::float_power__Tensor::call(self_meta, exponent_meta); |
13319 | } |
13320 | |
13321 | at::Tensor self_; |
13322 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13323 | at::functionalization::impl::sync(self); |
13324 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13325 | } else { |
13326 | self_ = self; |
13327 | } |
13328 | |
13329 | at::Tensor exponent_; |
13330 | if (at::functionalization::impl::isFunctionalTensor(exponent)) { |
13331 | at::functionalization::impl::sync(exponent); |
13332 | exponent_ = at::functionalization::impl::from_functional_tensor(exponent); |
13333 | } else { |
13334 | exponent_ = exponent; |
13335 | } |
13336 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13337 | if ((false || at::functionalization::impl::isFunctionalTensor(exponent))) { |
13338 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13339 | TORCH_INTERNAL_ASSERT(false, |
13340 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13341 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13342 | } else { |
13343 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13344 | at::AutoDispatchSkipFunctionalize guard; |
13345 | at::Tensor tmp_output = at::_ops::float_power__Tensor::call(self_, exponent_); |
13346 | return self;; |
13347 | } |
13348 | } else { |
13349 | at::Tensor tmp_output; |
13350 | { |
13351 | at::AutoDispatchSkipFunctionalize guard; |
13352 | tmp_output = at::_ops::float_power_Tensor_Tensor::call(self_, exponent_); |
13353 | } |
13354 | at::functionalization::impl::replace_(self, tmp_output); |
13355 | at::functionalization::impl::commit_update(self); |
13356 | at::functionalization::impl::sync(self); |
13357 | return self; |
13358 | } |
13359 | } |
13360 | |
13361 | at::Tensor & float_power_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) { |
13362 | if (false) { |
13363 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13364 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13365 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13366 | auto exponent_meta = to_meta(exponent); |
13367 | auto out_meta = to_meta(out); |
13368 | at::AutoDispatchSkipFunctionalize func_guard; |
13369 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13370 | at::_ops::float_power_Scalar_out::call(self, exponent_meta, out_meta); |
13371 | } |
13372 | |
13373 | at::Tensor exponent_; |
13374 | if (at::functionalization::impl::isFunctionalTensor(exponent)) { |
13375 | at::functionalization::impl::sync(exponent); |
13376 | exponent_ = at::functionalization::impl::from_functional_tensor(exponent); |
13377 | } else { |
13378 | exponent_ = exponent; |
13379 | } |
13380 | |
13381 | at::Tensor out_; |
13382 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13383 | at::functionalization::impl::sync(out); |
13384 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13385 | } else { |
13386 | out_ = out; |
13387 | } |
13388 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13389 | if ((false || at::functionalization::impl::isFunctionalTensor(exponent))) { |
13390 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13391 | TORCH_INTERNAL_ASSERT(false, |
13392 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13393 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13394 | } else { |
13395 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13396 | at::AutoDispatchSkipFunctionalize guard; |
13397 | at::Tensor tmp_output = at::_ops::float_power_Scalar_out::call(self, exponent_, out_); |
13398 | return out;; |
13399 | } |
13400 | } else { |
13401 | at::Tensor tmp_output; |
13402 | { |
13403 | at::AutoDispatchSkipFunctionalize guard; |
13404 | tmp_output = at::_ops::float_power_Scalar::call(self, exponent_); |
13405 | } |
13406 | at::functionalization::impl::replace_(out, tmp_output); |
13407 | at::functionalization::impl::commit_update(out); |
13408 | at::functionalization::impl::sync(out); |
13409 | return out; |
13410 | } |
13411 | } |
13412 | |
13413 | at::Tensor & float_power_out_Tensor_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) { |
13414 | if (false) { |
13415 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13416 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13417 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13418 | auto self_meta = to_meta(self); |
13419 | auto out_meta = to_meta(out); |
13420 | at::AutoDispatchSkipFunctionalize func_guard; |
13421 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13422 | at::_ops::float_power_Tensor_Scalar_out::call(self_meta, exponent, out_meta); |
13423 | } |
13424 | |
13425 | at::Tensor self_; |
13426 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13427 | at::functionalization::impl::sync(self); |
13428 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13429 | } else { |
13430 | self_ = self; |
13431 | } |
13432 | |
13433 | at::Tensor out_; |
13434 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13435 | at::functionalization::impl::sync(out); |
13436 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13437 | } else { |
13438 | out_ = out; |
13439 | } |
13440 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13441 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
13442 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13443 | TORCH_INTERNAL_ASSERT(false, |
13444 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13445 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13446 | } else { |
13447 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13448 | at::AutoDispatchSkipFunctionalize guard; |
13449 | at::Tensor tmp_output = at::_ops::float_power_Tensor_Scalar_out::call(self_, exponent, out_); |
13450 | return out;; |
13451 | } |
13452 | } else { |
13453 | at::Tensor tmp_output; |
13454 | { |
13455 | at::AutoDispatchSkipFunctionalize guard; |
13456 | tmp_output = at::_ops::float_power_Tensor_Scalar::call(self_, exponent); |
13457 | } |
13458 | at::functionalization::impl::replace_(out, tmp_output); |
13459 | at::functionalization::impl::commit_update(out); |
13460 | at::functionalization::impl::sync(out); |
13461 | return out; |
13462 | } |
13463 | } |
13464 | |
13465 | at::Tensor & float_power__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent) { |
13466 | if (true) { |
13467 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13468 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13469 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13470 | auto self_meta = to_meta(self); |
13471 | at::AutoDispatchSkipFunctionalize func_guard; |
13472 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13473 | at::_ops::float_power__Scalar::call(self_meta, exponent); |
13474 | } |
13475 | |
13476 | at::Tensor self_; |
13477 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13478 | at::functionalization::impl::sync(self); |
13479 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13480 | } else { |
13481 | self_ = self; |
13482 | } |
13483 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13484 | if ((false)) { |
13485 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13486 | TORCH_INTERNAL_ASSERT(false, |
13487 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13488 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13489 | } else { |
13490 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13491 | at::AutoDispatchSkipFunctionalize guard; |
13492 | at::Tensor tmp_output = at::_ops::float_power__Scalar::call(self_, exponent); |
13493 | return self;; |
13494 | } |
13495 | } else { |
13496 | at::Tensor tmp_output; |
13497 | { |
13498 | at::AutoDispatchSkipFunctionalize guard; |
13499 | tmp_output = at::_ops::float_power_Tensor_Scalar::call(self_, exponent); |
13500 | } |
13501 | at::functionalization::impl::replace_(self, tmp_output); |
13502 | at::functionalization::impl::commit_update(self); |
13503 | at::functionalization::impl::sync(self); |
13504 | return self; |
13505 | } |
13506 | } |
13507 | |
13508 | at::Tensor & normal_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) { |
13509 | if (false) { |
13510 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13511 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13512 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13513 | auto self_meta = to_meta(self); |
13514 | auto out_meta = to_meta(out); |
13515 | at::AutoDispatchSkipFunctionalize func_guard; |
13516 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13517 | at::_ops::normal_out::call(self_meta, mean, std, generator, out_meta); |
13518 | } |
13519 | |
13520 | at::Tensor self_; |
13521 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13522 | at::functionalization::impl::sync(self); |
13523 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13524 | } else { |
13525 | self_ = self; |
13526 | } |
13527 | |
13528 | at::Tensor out_; |
13529 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13530 | at::functionalization::impl::sync(out); |
13531 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13532 | } else { |
13533 | out_ = out; |
13534 | } |
13535 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13536 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
13537 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13538 | TORCH_INTERNAL_ASSERT(false, |
13539 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13540 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13541 | } else { |
13542 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13543 | at::AutoDispatchSkipFunctionalize guard; |
13544 | at::Tensor tmp_output = at::_ops::normal_out::call(self_, mean, std, generator, out_); |
13545 | return out;; |
13546 | } |
13547 | } else { |
13548 | at::Tensor tmp_output; |
13549 | { |
13550 | at::AutoDispatchSkipFunctionalize guard; |
13551 | tmp_output = at::_ops::normal_functional::call(self_, mean, std, generator); |
13552 | } |
13553 | at::functionalization::impl::replace_(out, tmp_output); |
13554 | at::functionalization::impl::commit_update(out); |
13555 | at::functionalization::impl::sync(out); |
13556 | return out; |
13557 | } |
13558 | } |
13559 | |
13560 | at::Tensor & normal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) { |
13561 | if (true) { |
13562 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13563 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13564 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13565 | auto self_meta = to_meta(self); |
13566 | at::AutoDispatchSkipFunctionalize func_guard; |
13567 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13568 | at::_ops::normal_::call(self_meta, mean, std, generator); |
13569 | } |
13570 | |
13571 | at::Tensor self_; |
13572 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13573 | at::functionalization::impl::sync(self); |
13574 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13575 | } else { |
13576 | self_ = self; |
13577 | } |
13578 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13579 | if ((false)) { |
13580 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13581 | TORCH_INTERNAL_ASSERT(false, |
13582 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13583 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13584 | } else { |
13585 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13586 | at::AutoDispatchSkipFunctionalize guard; |
13587 | at::Tensor tmp_output = at::_ops::normal_::call(self_, mean, std, generator); |
13588 | return self;; |
13589 | } |
13590 | } else { |
13591 | at::Tensor tmp_output; |
13592 | { |
13593 | at::AutoDispatchSkipFunctionalize guard; |
13594 | tmp_output = at::_ops::normal_functional::call(self_, mean, std, generator); |
13595 | } |
13596 | at::functionalization::impl::replace_(self, tmp_output); |
13597 | at::functionalization::impl::commit_update(self); |
13598 | at::functionalization::impl::sync(self); |
13599 | return self; |
13600 | } |
13601 | } |
13602 | |
13603 | at::Tensor & normal_out_Tensor_float_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) { |
13604 | if (false) { |
13605 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13606 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13607 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13608 | auto mean_meta = to_meta(mean); |
13609 | auto out_meta = to_meta(out); |
13610 | at::AutoDispatchSkipFunctionalize func_guard; |
13611 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13612 | at::_ops::normal_Tensor_float_out::call(mean_meta, std, generator, out_meta); |
13613 | } |
13614 | |
13615 | at::Tensor mean_; |
13616 | if (at::functionalization::impl::isFunctionalTensor(mean)) { |
13617 | at::functionalization::impl::sync(mean); |
13618 | mean_ = at::functionalization::impl::from_functional_tensor(mean); |
13619 | } else { |
13620 | mean_ = mean; |
13621 | } |
13622 | |
13623 | at::Tensor out_; |
13624 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13625 | at::functionalization::impl::sync(out); |
13626 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13627 | } else { |
13628 | out_ = out; |
13629 | } |
13630 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13631 | if ((false || at::functionalization::impl::isFunctionalTensor(mean))) { |
13632 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13633 | TORCH_INTERNAL_ASSERT(false, |
13634 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13635 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13636 | } else { |
13637 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13638 | at::AutoDispatchSkipFunctionalize guard; |
13639 | at::Tensor tmp_output = at::_ops::normal_Tensor_float_out::call(mean_, std, generator, out_); |
13640 | return out;; |
13641 | } |
13642 | } else { |
13643 | at::Tensor tmp_output; |
13644 | { |
13645 | at::AutoDispatchSkipFunctionalize guard; |
13646 | tmp_output = at::_ops::normal_Tensor_float::call(mean_, std, generator); |
13647 | } |
13648 | at::functionalization::impl::replace_(out, tmp_output); |
13649 | at::functionalization::impl::commit_update(out); |
13650 | at::functionalization::impl::sync(out); |
13651 | return out; |
13652 | } |
13653 | } |
13654 | |
13655 | at::Tensor & normal_out_float_Tensor_out(c10::DispatchKeySet dispatchKeySet, double mean, const at::Tensor & std, c10::optional<at::Generator> generator, at::Tensor & out) { |
13656 | if (false) { |
13657 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13658 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13659 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13660 | auto std_meta = to_meta(std); |
13661 | auto out_meta = to_meta(out); |
13662 | at::AutoDispatchSkipFunctionalize func_guard; |
13663 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13664 | at::_ops::normal_float_Tensor_out::call(mean, std_meta, generator, out_meta); |
13665 | } |
13666 | |
13667 | at::Tensor std_; |
13668 | if (at::functionalization::impl::isFunctionalTensor(std)) { |
13669 | at::functionalization::impl::sync(std); |
13670 | std_ = at::functionalization::impl::from_functional_tensor(std); |
13671 | } else { |
13672 | std_ = std; |
13673 | } |
13674 | |
13675 | at::Tensor out_; |
13676 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13677 | at::functionalization::impl::sync(out); |
13678 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13679 | } else { |
13680 | out_ = out; |
13681 | } |
13682 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13683 | if ((false || at::functionalization::impl::isFunctionalTensor(std))) { |
13684 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13685 | TORCH_INTERNAL_ASSERT(false, |
13686 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13687 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13688 | } else { |
13689 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13690 | at::AutoDispatchSkipFunctionalize guard; |
13691 | at::Tensor tmp_output = at::_ops::normal_float_Tensor_out::call(mean, std_, generator, out_); |
13692 | return out;; |
13693 | } |
13694 | } else { |
13695 | at::Tensor tmp_output; |
13696 | { |
13697 | at::AutoDispatchSkipFunctionalize guard; |
13698 | tmp_output = at::_ops::normal_float_Tensor::call(mean, std_, generator); |
13699 | } |
13700 | at::functionalization::impl::replace_(out, tmp_output); |
13701 | at::functionalization::impl::commit_update(out); |
13702 | at::functionalization::impl::sync(out); |
13703 | return out; |
13704 | } |
13705 | } |
13706 | |
13707 | at::Tensor & normal_out_Tensor_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator, at::Tensor & out) { |
13708 | if (false) { |
13709 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13710 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13711 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13712 | auto mean_meta = to_meta(mean); |
13713 | auto std_meta = to_meta(std); |
13714 | auto out_meta = to_meta(out); |
13715 | at::AutoDispatchSkipFunctionalize func_guard; |
13716 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13717 | at::_ops::normal_Tensor_Tensor_out::call(mean_meta, std_meta, generator, out_meta); |
13718 | } |
13719 | |
13720 | at::Tensor mean_; |
13721 | if (at::functionalization::impl::isFunctionalTensor(mean)) { |
13722 | at::functionalization::impl::sync(mean); |
13723 | mean_ = at::functionalization::impl::from_functional_tensor(mean); |
13724 | } else { |
13725 | mean_ = mean; |
13726 | } |
13727 | |
13728 | at::Tensor std_; |
13729 | if (at::functionalization::impl::isFunctionalTensor(std)) { |
13730 | at::functionalization::impl::sync(std); |
13731 | std_ = at::functionalization::impl::from_functional_tensor(std); |
13732 | } else { |
13733 | std_ = std; |
13734 | } |
13735 | |
13736 | at::Tensor out_; |
13737 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13738 | at::functionalization::impl::sync(out); |
13739 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13740 | } else { |
13741 | out_ = out; |
13742 | } |
13743 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13744 | if ((false || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(std))) { |
13745 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13746 | TORCH_INTERNAL_ASSERT(false, |
13747 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13748 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13749 | } else { |
13750 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13751 | at::AutoDispatchSkipFunctionalize guard; |
13752 | at::Tensor tmp_output = at::_ops::normal_Tensor_Tensor_out::call(mean_, std_, generator, out_); |
13753 | return out;; |
13754 | } |
13755 | } else { |
13756 | at::Tensor tmp_output; |
13757 | { |
13758 | at::AutoDispatchSkipFunctionalize guard; |
13759 | tmp_output = at::_ops::normal_Tensor_Tensor::call(mean_, std_, generator); |
13760 | } |
13761 | at::functionalization::impl::replace_(out, tmp_output); |
13762 | at::functionalization::impl::commit_update(out); |
13763 | at::functionalization::impl::sync(out); |
13764 | return out; |
13765 | } |
13766 | } |
13767 | |
13768 | at::Tensor & normal_out_float_float_out(c10::DispatchKeySet dispatchKeySet, double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
13769 | if (false) { |
13770 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13771 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13772 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13773 | auto out_meta = to_meta(out); |
13774 | at::AutoDispatchSkipFunctionalize func_guard; |
13775 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13776 | at::_ops::normal_float_float_out::call(mean, std, size, generator, out_meta); |
13777 | } |
13778 | |
13779 | at::Tensor out_; |
13780 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13781 | at::functionalization::impl::sync(out); |
13782 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13783 | } else { |
13784 | out_ = out; |
13785 | } |
13786 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13787 | if ((false)) { |
13788 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13789 | TORCH_INTERNAL_ASSERT(false, |
13790 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13791 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13792 | } else { |
13793 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13794 | at::AutoDispatchSkipFunctionalize guard; |
13795 | at::Tensor tmp_output = at::_ops::normal_float_float_out::call(mean, std, size, generator, out_); |
13796 | return out;; |
13797 | } |
13798 | } else { |
13799 | at::Tensor tmp_output; |
13800 | { |
13801 | at::AutoDispatchSkipFunctionalize guard; |
13802 | tmp_output = at::_ops::normal_float_float::call(mean, std, size, generator, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
13803 | } |
13804 | at::functionalization::impl::replace_(out, tmp_output); |
13805 | at::functionalization::impl::commit_update(out); |
13806 | at::functionalization::impl::sync(out); |
13807 | return out; |
13808 | } |
13809 | } |
13810 | |
13811 | at::Tensor & _amp_update_scale_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out) { |
13812 | if (false) { |
13813 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13814 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13815 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13816 | auto self_meta = to_meta(self); |
13817 | auto growth_tracker_meta = to_meta(growth_tracker); |
13818 | auto found_inf_meta = to_meta(found_inf); |
13819 | auto out_meta = to_meta(out); |
13820 | at::AutoDispatchSkipFunctionalize func_guard; |
13821 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13822 | at::_ops::_amp_update_scale_out::call(self_meta, growth_tracker_meta, found_inf_meta, scale_growth_factor, scale_backoff_factor, growth_interval, out_meta); |
13823 | } |
13824 | |
13825 | at::Tensor self_; |
13826 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13827 | at::functionalization::impl::sync(self); |
13828 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13829 | } else { |
13830 | self_ = self; |
13831 | } |
13832 | |
13833 | at::Tensor growth_tracker_; |
13834 | if (at::functionalization::impl::isFunctionalTensor(growth_tracker)) { |
13835 | at::functionalization::impl::sync(growth_tracker); |
13836 | growth_tracker_ = at::functionalization::impl::from_functional_tensor(growth_tracker); |
13837 | } else { |
13838 | growth_tracker_ = growth_tracker; |
13839 | } |
13840 | |
13841 | at::Tensor found_inf_; |
13842 | if (at::functionalization::impl::isFunctionalTensor(found_inf)) { |
13843 | at::functionalization::impl::sync(found_inf); |
13844 | found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf); |
13845 | } else { |
13846 | found_inf_ = found_inf; |
13847 | } |
13848 | |
13849 | at::Tensor out_; |
13850 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13851 | at::functionalization::impl::sync(out); |
13852 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13853 | } else { |
13854 | out_ = out; |
13855 | } |
13856 | if (!(true && at::functionalization::impl::isFunctionalTensor(growth_tracker) && at::functionalization::impl::isFunctionalTensor(out))) { |
13857 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(found_inf))) { |
13858 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13859 | TORCH_INTERNAL_ASSERT(false, |
13860 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13861 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13862 | } else { |
13863 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13864 | at::AutoDispatchSkipFunctionalize guard; |
13865 | at::Tensor tmp_output = at::_ops::_amp_update_scale_out::call(self_, growth_tracker_, found_inf_, scale_growth_factor, scale_backoff_factor, growth_interval, out_); |
13866 | return out;; |
13867 | } |
13868 | } else { |
13869 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
13870 | { |
13871 | at::AutoDispatchSkipFunctionalize guard; |
13872 | tmp_output = at::_ops::_amp_update_scale::call(self_, growth_tracker_, found_inf_, scale_growth_factor, scale_backoff_factor, growth_interval); |
13873 | } |
13874 | at::functionalization::impl::replace_(growth_tracker, std::get<0>(tmp_output)); |
13875 | at::functionalization::impl::commit_update(growth_tracker); |
13876 | at::functionalization::impl::sync(growth_tracker); |
13877 | at::functionalization::impl::replace_(out, std::get<1>(tmp_output)); |
13878 | at::functionalization::impl::commit_update(out); |
13879 | at::functionalization::impl::sync(out); |
13880 | return out; |
13881 | } |
13882 | } |
13883 | |
13884 | at::Tensor & _amp_update_scale_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { |
13885 | if (true) { |
13886 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13887 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13888 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13889 | auto self_meta = to_meta(self); |
13890 | auto growth_tracker_meta = to_meta(growth_tracker); |
13891 | auto found_inf_meta = to_meta(found_inf); |
13892 | at::AutoDispatchSkipFunctionalize func_guard; |
13893 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13894 | at::_ops::_amp_update_scale_::call(self_meta, growth_tracker_meta, found_inf_meta, scale_growth_factor, scale_backoff_factor, growth_interval); |
13895 | } |
13896 | |
13897 | at::Tensor self_; |
13898 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13899 | at::functionalization::impl::sync(self); |
13900 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13901 | } else { |
13902 | self_ = self; |
13903 | } |
13904 | |
13905 | at::Tensor growth_tracker_; |
13906 | if (at::functionalization::impl::isFunctionalTensor(growth_tracker)) { |
13907 | at::functionalization::impl::sync(growth_tracker); |
13908 | growth_tracker_ = at::functionalization::impl::from_functional_tensor(growth_tracker); |
13909 | } else { |
13910 | growth_tracker_ = growth_tracker; |
13911 | } |
13912 | |
13913 | at::Tensor found_inf_; |
13914 | if (at::functionalization::impl::isFunctionalTensor(found_inf)) { |
13915 | at::functionalization::impl::sync(found_inf); |
13916 | found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf); |
13917 | } else { |
13918 | found_inf_ = found_inf; |
13919 | } |
13920 | if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(growth_tracker))) { |
13921 | if ((false || at::functionalization::impl::isFunctionalTensor(found_inf))) { |
13922 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13923 | TORCH_INTERNAL_ASSERT(false, |
13924 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13925 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13926 | } else { |
13927 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13928 | at::AutoDispatchSkipFunctionalize guard; |
13929 | at::Tensor tmp_output = at::_ops::_amp_update_scale_::call(self_, growth_tracker_, found_inf_, scale_growth_factor, scale_backoff_factor, growth_interval); |
13930 | return self;; |
13931 | } |
13932 | } else { |
13933 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
13934 | { |
13935 | at::AutoDispatchSkipFunctionalize guard; |
13936 | tmp_output = at::_ops::_amp_update_scale::call(self_, growth_tracker_, found_inf_, scale_growth_factor, scale_backoff_factor, growth_interval); |
13937 | } |
13938 | at::functionalization::impl::replace_(self, std::get<0>(tmp_output)); |
13939 | at::functionalization::impl::commit_update(self); |
13940 | at::functionalization::impl::sync(self); |
13941 | at::functionalization::impl::replace_(growth_tracker, std::get<1>(tmp_output)); |
13942 | at::functionalization::impl::commit_update(growth_tracker); |
13943 | at::functionalization::impl::sync(growth_tracker); |
13944 | return self; |
13945 | } |
13946 | } |
13947 | |
13948 | void _foreach_atan_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
13949 | if (false) { |
13950 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13951 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13952 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13953 | auto self_meta = to_meta(self); |
13954 | auto out_meta = to_meta(out); |
13955 | at::AutoDispatchSkipFunctionalize func_guard; |
13956 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13957 | at::_ops::_foreach_atan_out::call(self_meta, out_meta); |
13958 | } |
13959 | |
13960 | ::std::vector<at::Tensor> self_; |
13961 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13962 | at::functionalization::impl::sync(self); |
13963 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13964 | } else { |
13965 | self_ = self.vec(); |
13966 | } |
13967 | |
13968 | ::std::vector<at::Tensor> out_; |
13969 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13970 | at::functionalization::impl::sync(out); |
13971 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13972 | } else { |
13973 | out_ = out.vec(); |
13974 | } |
13975 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13976 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
13977 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13978 | TORCH_INTERNAL_ASSERT(false, |
13979 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13980 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13981 | } else { |
13982 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13983 | at::AutoDispatchSkipFunctionalize guard; |
13984 | at::_ops::_foreach_atan_out::call(self_, out_); |
13985 | ; |
13986 | } |
13987 | } else { |
13988 | ::std::vector<at::Tensor> tmp_output; |
13989 | { |
13990 | at::AutoDispatchSkipFunctionalize guard; |
13991 | tmp_output = at::_ops::_foreach_atan::call(self_); |
13992 | } |
13993 | at::functionalization::impl::replace_(out, tmp_output); |
13994 | at::functionalization::impl::commit_update(out); |
13995 | at::functionalization::impl::sync(out); |
13996 | |
13997 | } |
13998 | } |
13999 | |
14000 | void _foreach_atan_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
14001 | if (true) { |
14002 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14003 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14004 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14005 | auto self_meta = to_meta(self); |
14006 | at::AutoDispatchSkipFunctionalize func_guard; |
14007 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14008 | at::_ops::_foreach_atan_::call(self_meta); |
14009 | } |
14010 | |
14011 | ::std::vector<at::Tensor> self_; |
14012 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14013 | at::functionalization::impl::sync(self); |
14014 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14015 | } else { |
14016 | self_ = self.vec(); |
14017 | } |
14018 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14019 | if ((false)) { |
14020 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14021 | TORCH_INTERNAL_ASSERT(false, |
14022 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14023 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14024 | } else { |
14025 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14026 | at::AutoDispatchSkipFunctionalize guard; |
14027 | at::_ops::_foreach_atan_::call(self_); |
14028 | ; |
14029 | } |
14030 | } else { |
14031 | ::std::vector<at::Tensor> tmp_output; |
14032 | { |
14033 | at::AutoDispatchSkipFunctionalize guard; |
14034 | tmp_output = at::_ops::_foreach_atan::call(self_); |
14035 | } |
14036 | at::functionalization::impl::replace_(self, tmp_output); |
14037 | at::functionalization::impl::commit_update(self); |
14038 | at::functionalization::impl::sync(self); |
14039 | |
14040 | } |
14041 | } |
14042 | |
14043 | void _foreach_erf_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
14044 | if (false) { |
14045 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14046 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14047 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14048 | auto self_meta = to_meta(self); |
14049 | auto out_meta = to_meta(out); |
14050 | at::AutoDispatchSkipFunctionalize func_guard; |
14051 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14052 | at::_ops::_foreach_erf_out::call(self_meta, out_meta); |
14053 | } |
14054 | |
14055 | ::std::vector<at::Tensor> self_; |
14056 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14057 | at::functionalization::impl::sync(self); |
14058 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14059 | } else { |
14060 | self_ = self.vec(); |
14061 | } |
14062 | |
14063 | ::std::vector<at::Tensor> out_; |
14064 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14065 | at::functionalization::impl::sync(out); |
14066 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14067 | } else { |
14068 | out_ = out.vec(); |
14069 | } |
14070 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14071 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14072 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14073 | TORCH_INTERNAL_ASSERT(false, |
14074 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14075 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14076 | } else { |
14077 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14078 | at::AutoDispatchSkipFunctionalize guard; |
14079 | at::_ops::_foreach_erf_out::call(self_, out_); |
14080 | ; |
14081 | } |
14082 | } else { |
14083 | ::std::vector<at::Tensor> tmp_output; |
14084 | { |
14085 | at::AutoDispatchSkipFunctionalize guard; |
14086 | tmp_output = at::_ops::_foreach_erf::call(self_); |
14087 | } |
14088 | at::functionalization::impl::replace_(out, tmp_output); |
14089 | at::functionalization::impl::commit_update(out); |
14090 | at::functionalization::impl::sync(out); |
14091 | |
14092 | } |
14093 | } |
14094 | |
14095 | void _foreach_erf_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
14096 | if (true) { |
14097 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14098 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14099 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14100 | auto self_meta = to_meta(self); |
14101 | at::AutoDispatchSkipFunctionalize func_guard; |
14102 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14103 | at::_ops::_foreach_erf_::call(self_meta); |
14104 | } |
14105 | |
14106 | ::std::vector<at::Tensor> self_; |
14107 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14108 | at::functionalization::impl::sync(self); |
14109 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14110 | } else { |
14111 | self_ = self.vec(); |
14112 | } |
14113 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14114 | if ((false)) { |
14115 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14116 | TORCH_INTERNAL_ASSERT(false, |
14117 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14118 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14119 | } else { |
14120 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14121 | at::AutoDispatchSkipFunctionalize guard; |
14122 | at::_ops::_foreach_erf_::call(self_); |
14123 | ; |
14124 | } |
14125 | } else { |
14126 | ::std::vector<at::Tensor> tmp_output; |
14127 | { |
14128 | at::AutoDispatchSkipFunctionalize guard; |
14129 | tmp_output = at::_ops::_foreach_erf::call(self_); |
14130 | } |
14131 | at::functionalization::impl::replace_(self, tmp_output); |
14132 | at::functionalization::impl::commit_update(self); |
14133 | at::functionalization::impl::sync(self); |
14134 | |
14135 | } |
14136 | } |
14137 | |
14138 | void _foreach_erfc_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
14139 | if (false) { |
14140 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14141 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14142 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14143 | auto self_meta = to_meta(self); |
14144 | auto out_meta = to_meta(out); |
14145 | at::AutoDispatchSkipFunctionalize func_guard; |
14146 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14147 | at::_ops::_foreach_erfc_out::call(self_meta, out_meta); |
14148 | } |
14149 | |
14150 | ::std::vector<at::Tensor> self_; |
14151 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14152 | at::functionalization::impl::sync(self); |
14153 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14154 | } else { |
14155 | self_ = self.vec(); |
14156 | } |
14157 | |
14158 | ::std::vector<at::Tensor> out_; |
14159 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14160 | at::functionalization::impl::sync(out); |
14161 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14162 | } else { |
14163 | out_ = out.vec(); |
14164 | } |
14165 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14166 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14167 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14168 | TORCH_INTERNAL_ASSERT(false, |
14169 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14170 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14171 | } else { |
14172 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14173 | at::AutoDispatchSkipFunctionalize guard; |
14174 | at::_ops::_foreach_erfc_out::call(self_, out_); |
14175 | ; |
14176 | } |
14177 | } else { |
14178 | ::std::vector<at::Tensor> tmp_output; |
14179 | { |
14180 | at::AutoDispatchSkipFunctionalize guard; |
14181 | tmp_output = at::_ops::_foreach_erfc::call(self_); |
14182 | } |
14183 | at::functionalization::impl::replace_(out, tmp_output); |
14184 | at::functionalization::impl::commit_update(out); |
14185 | at::functionalization::impl::sync(out); |
14186 | |
14187 | } |
14188 | } |
14189 | |
14190 | void _foreach_erfc_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
14191 | if (true) { |
14192 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14193 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14194 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14195 | auto self_meta = to_meta(self); |
14196 | at::AutoDispatchSkipFunctionalize func_guard; |
14197 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14198 | at::_ops::_foreach_erfc_::call(self_meta); |
14199 | } |
14200 | |
14201 | ::std::vector<at::Tensor> self_; |
14202 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14203 | at::functionalization::impl::sync(self); |
14204 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14205 | } else { |
14206 | self_ = self.vec(); |
14207 | } |
14208 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14209 | if ((false)) { |
14210 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14211 | TORCH_INTERNAL_ASSERT(false, |
14212 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14213 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14214 | } else { |
14215 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14216 | at::AutoDispatchSkipFunctionalize guard; |
14217 | at::_ops::_foreach_erfc_::call(self_); |
14218 | ; |
14219 | } |
14220 | } else { |
14221 | ::std::vector<at::Tensor> tmp_output; |
14222 | { |
14223 | at::AutoDispatchSkipFunctionalize guard; |
14224 | tmp_output = at::_ops::_foreach_erfc::call(self_); |
14225 | } |
14226 | at::functionalization::impl::replace_(self, tmp_output); |
14227 | at::functionalization::impl::commit_update(self); |
14228 | at::functionalization::impl::sync(self); |
14229 | |
14230 | } |
14231 | } |
14232 | |
14233 | void _foreach_log_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
14234 | if (false) { |
14235 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14236 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14237 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14238 | auto self_meta = to_meta(self); |
14239 | auto out_meta = to_meta(out); |
14240 | at::AutoDispatchSkipFunctionalize func_guard; |
14241 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14242 | at::_ops::_foreach_log_out::call(self_meta, out_meta); |
14243 | } |
14244 | |
14245 | ::std::vector<at::Tensor> self_; |
14246 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14247 | at::functionalization::impl::sync(self); |
14248 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14249 | } else { |
14250 | self_ = self.vec(); |
14251 | } |
14252 | |
14253 | ::std::vector<at::Tensor> out_; |
14254 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14255 | at::functionalization::impl::sync(out); |
14256 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14257 | } else { |
14258 | out_ = out.vec(); |
14259 | } |
14260 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14261 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14262 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14263 | TORCH_INTERNAL_ASSERT(false, |
14264 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14265 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14266 | } else { |
14267 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14268 | at::AutoDispatchSkipFunctionalize guard; |
14269 | at::_ops::_foreach_log_out::call(self_, out_); |
14270 | ; |
14271 | } |
14272 | } else { |
14273 | ::std::vector<at::Tensor> tmp_output; |
14274 | { |
14275 | at::AutoDispatchSkipFunctionalize guard; |
14276 | tmp_output = at::_ops::_foreach_log::call(self_); |
14277 | } |
14278 | at::functionalization::impl::replace_(out, tmp_output); |
14279 | at::functionalization::impl::commit_update(out); |
14280 | at::functionalization::impl::sync(out); |
14281 | |
14282 | } |
14283 | } |
14284 | |
14285 | void _foreach_log_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
14286 | if (true) { |
14287 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14288 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14289 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14290 | auto self_meta = to_meta(self); |
14291 | at::AutoDispatchSkipFunctionalize func_guard; |
14292 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14293 | at::_ops::_foreach_log_::call(self_meta); |
14294 | } |
14295 | |
14296 | ::std::vector<at::Tensor> self_; |
14297 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14298 | at::functionalization::impl::sync(self); |
14299 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14300 | } else { |
14301 | self_ = self.vec(); |
14302 | } |
14303 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14304 | if ((false)) { |
14305 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14306 | TORCH_INTERNAL_ASSERT(false, |
14307 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14308 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14309 | } else { |
14310 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14311 | at::AutoDispatchSkipFunctionalize guard; |
14312 | at::_ops::_foreach_log_::call(self_); |
14313 | ; |
14314 | } |
14315 | } else { |
14316 | ::std::vector<at::Tensor> tmp_output; |
14317 | { |
14318 | at::AutoDispatchSkipFunctionalize guard; |
14319 | tmp_output = at::_ops::_foreach_log::call(self_); |
14320 | } |
14321 | at::functionalization::impl::replace_(self, tmp_output); |
14322 | at::functionalization::impl::commit_update(self); |
14323 | at::functionalization::impl::sync(self); |
14324 | |
14325 | } |
14326 | } |
14327 | |
14328 | void _foreach_sinh_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
14329 | if (false) { |
14330 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14331 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14332 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14333 | auto self_meta = to_meta(self); |
14334 | auto out_meta = to_meta(out); |
14335 | at::AutoDispatchSkipFunctionalize func_guard; |
14336 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14337 | at::_ops::_foreach_sinh_out::call(self_meta, out_meta); |
14338 | } |
14339 | |
14340 | ::std::vector<at::Tensor> self_; |
14341 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14342 | at::functionalization::impl::sync(self); |
14343 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14344 | } else { |
14345 | self_ = self.vec(); |
14346 | } |
14347 | |
14348 | ::std::vector<at::Tensor> out_; |
14349 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14350 | at::functionalization::impl::sync(out); |
14351 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14352 | } else { |
14353 | out_ = out.vec(); |
14354 | } |
14355 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14356 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14357 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14358 | TORCH_INTERNAL_ASSERT(false, |
14359 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14360 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14361 | } else { |
14362 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14363 | at::AutoDispatchSkipFunctionalize guard; |
14364 | at::_ops::_foreach_sinh_out::call(self_, out_); |
14365 | ; |
14366 | } |
14367 | } else { |
14368 | ::std::vector<at::Tensor> tmp_output; |
14369 | { |
14370 | at::AutoDispatchSkipFunctionalize guard; |
14371 | tmp_output = at::_ops::_foreach_sinh::call(self_); |
14372 | } |
14373 | at::functionalization::impl::replace_(out, tmp_output); |
14374 | at::functionalization::impl::commit_update(out); |
14375 | at::functionalization::impl::sync(out); |
14376 | |
14377 | } |
14378 | } |
14379 | |
14380 | void _foreach_sinh_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
14381 | if (true) { |
14382 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14383 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14384 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14385 | auto self_meta = to_meta(self); |
14386 | at::AutoDispatchSkipFunctionalize func_guard; |
14387 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14388 | at::_ops::_foreach_sinh_::call(self_meta); |
14389 | } |
14390 | |
14391 | ::std::vector<at::Tensor> self_; |
14392 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14393 | at::functionalization::impl::sync(self); |
14394 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14395 | } else { |
14396 | self_ = self.vec(); |
14397 | } |
14398 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14399 | if ((false)) { |
14400 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14401 | TORCH_INTERNAL_ASSERT(false, |
14402 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14403 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14404 | } else { |
14405 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14406 | at::AutoDispatchSkipFunctionalize guard; |
14407 | at::_ops::_foreach_sinh_::call(self_); |
14408 | ; |
14409 | } |
14410 | } else { |
14411 | ::std::vector<at::Tensor> tmp_output; |
14412 | { |
14413 | at::AutoDispatchSkipFunctionalize guard; |
14414 | tmp_output = at::_ops::_foreach_sinh::call(self_); |
14415 | } |
14416 | at::functionalization::impl::replace_(self, tmp_output); |
14417 | at::functionalization::impl::commit_update(self); |
14418 | at::functionalization::impl::sync(self); |
14419 | |
14420 | } |
14421 | } |
14422 | |
14423 | void _foreach_lgamma_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
14424 | if (false) { |
14425 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14426 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14427 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14428 | auto self_meta = to_meta(self); |
14429 | auto out_meta = to_meta(out); |
14430 | at::AutoDispatchSkipFunctionalize func_guard; |
14431 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14432 | at::_ops::_foreach_lgamma_out::call(self_meta, out_meta); |
14433 | } |
14434 | |
14435 | ::std::vector<at::Tensor> self_; |
14436 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14437 | at::functionalization::impl::sync(self); |
14438 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14439 | } else { |
14440 | self_ = self.vec(); |
14441 | } |
14442 | |
14443 | ::std::vector<at::Tensor> out_; |
14444 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14445 | at::functionalization::impl::sync(out); |
14446 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14447 | } else { |
14448 | out_ = out.vec(); |
14449 | } |
14450 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14451 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14452 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14453 | TORCH_INTERNAL_ASSERT(false, |
14454 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14455 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14456 | } else { |
14457 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14458 | at::AutoDispatchSkipFunctionalize guard; |
14459 | at::_ops::_foreach_lgamma_out::call(self_, out_); |
14460 | ; |
14461 | } |
14462 | } else { |
14463 | ::std::vector<at::Tensor> tmp_output; |
14464 | { |
14465 | at::AutoDispatchSkipFunctionalize guard; |
14466 | tmp_output = at::_ops::_foreach_lgamma::call(self_); |
14467 | } |
14468 | at::functionalization::impl::replace_(out, tmp_output); |
14469 | at::functionalization::impl::commit_update(out); |
14470 | at::functionalization::impl::sync(out); |
14471 | |
14472 | } |
14473 | } |
14474 | |
14475 | void _foreach_lgamma_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
14476 | if (true) { |
14477 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14478 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14479 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14480 | auto self_meta = to_meta(self); |
14481 | at::AutoDispatchSkipFunctionalize func_guard; |
14482 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14483 | at::_ops::_foreach_lgamma_::call(self_meta); |
14484 | } |
14485 | |
14486 | ::std::vector<at::Tensor> self_; |
14487 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14488 | at::functionalization::impl::sync(self); |
14489 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14490 | } else { |
14491 | self_ = self.vec(); |
14492 | } |
14493 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14494 | if ((false)) { |
14495 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14496 | TORCH_INTERNAL_ASSERT(false, |
14497 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14498 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14499 | } else { |
14500 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14501 | at::AutoDispatchSkipFunctionalize guard; |
14502 | at::_ops::_foreach_lgamma_::call(self_); |
14503 | ; |
14504 | } |
14505 | } else { |
14506 | ::std::vector<at::Tensor> tmp_output; |
14507 | { |
14508 | at::AutoDispatchSkipFunctionalize guard; |
14509 | tmp_output = at::_ops::_foreach_lgamma::call(self_); |
14510 | } |
14511 | at::functionalization::impl::replace_(self, tmp_output); |
14512 | at::functionalization::impl::commit_update(self); |
14513 | at::functionalization::impl::sync(self); |
14514 | |
14515 | } |
14516 | } |
14517 | |
14518 | void _foreach_lerp_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) { |
14519 | if (false) { |
14520 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14521 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14522 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14523 | auto self_meta = to_meta(self); |
14524 | auto tensors1_meta = to_meta(tensors1); |
14525 | auto weights_meta = to_meta(weights); |
14526 | auto out_meta = to_meta(out); |
14527 | at::AutoDispatchSkipFunctionalize func_guard; |
14528 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14529 | at::_ops::_foreach_lerp_List_out::call(self_meta, tensors1_meta, weights_meta, out_meta); |
14530 | } |
14531 | |
14532 | ::std::vector<at::Tensor> self_; |
14533 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14534 | at::functionalization::impl::sync(self); |
14535 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14536 | } else { |
14537 | self_ = self.vec(); |
14538 | } |
14539 | |
14540 | ::std::vector<at::Tensor> tensors1_; |
14541 | if (at::functionalization::impl::isFunctionalTensor(tensors1)) { |
14542 | at::functionalization::impl::sync(tensors1); |
14543 | tensors1_ = at::functionalization::impl::from_functional_tensor(tensors1); |
14544 | } else { |
14545 | tensors1_ = tensors1.vec(); |
14546 | } |
14547 | |
14548 | ::std::vector<at::Tensor> weights_; |
14549 | if (at::functionalization::impl::isFunctionalTensor(weights)) { |
14550 | at::functionalization::impl::sync(weights); |
14551 | weights_ = at::functionalization::impl::from_functional_tensor(weights); |
14552 | } else { |
14553 | weights_ = weights.vec(); |
14554 | } |
14555 | |
14556 | ::std::vector<at::Tensor> out_; |
14557 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14558 | at::functionalization::impl::sync(out); |
14559 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14560 | } else { |
14561 | out_ = out.vec(); |
14562 | } |
14563 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14564 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensors1) || at::functionalization::impl::isFunctionalTensor(weights))) { |
14565 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14566 | TORCH_INTERNAL_ASSERT(false, |
14567 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14568 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14569 | } else { |
14570 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14571 | at::AutoDispatchSkipFunctionalize guard; |
14572 | at::_ops::_foreach_lerp_List_out::call(self_, tensors1_, weights_, out_); |
14573 | ; |
14574 | } |
14575 | } else { |
14576 | ::std::vector<at::Tensor> tmp_output; |
14577 | { |
14578 | at::AutoDispatchSkipFunctionalize guard; |
14579 | tmp_output = at::_ops::_foreach_lerp_List::call(self_, tensors1_, weights_); |
14580 | } |
14581 | at::functionalization::impl::replace_(out, tmp_output); |
14582 | at::functionalization::impl::commit_update(out); |
14583 | at::functionalization::impl::sync(out); |
14584 | |
14585 | } |
14586 | } |
14587 | |
14588 | void _foreach_lerp__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights) { |
14589 | if (true) { |
14590 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14591 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14592 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14593 | auto self_meta = to_meta(self); |
14594 | auto tensors1_meta = to_meta(tensors1); |
14595 | auto weights_meta = to_meta(weights); |
14596 | at::AutoDispatchSkipFunctionalize func_guard; |
14597 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14598 | at::_ops::_foreach_lerp__List::call(self_meta, tensors1_meta, weights_meta); |
14599 | } |
14600 | |
14601 | ::std::vector<at::Tensor> self_; |
14602 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14603 | at::functionalization::impl::sync(self); |
14604 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14605 | } else { |
14606 | self_ = self.vec(); |
14607 | } |
14608 | |
14609 | ::std::vector<at::Tensor> tensors1_; |
14610 | if (at::functionalization::impl::isFunctionalTensor(tensors1)) { |
14611 | at::functionalization::impl::sync(tensors1); |
14612 | tensors1_ = at::functionalization::impl::from_functional_tensor(tensors1); |
14613 | } else { |
14614 | tensors1_ = tensors1.vec(); |
14615 | } |
14616 | |
14617 | ::std::vector<at::Tensor> weights_; |
14618 | if (at::functionalization::impl::isFunctionalTensor(weights)) { |
14619 | at::functionalization::impl::sync(weights); |
14620 | weights_ = at::functionalization::impl::from_functional_tensor(weights); |
14621 | } else { |
14622 | weights_ = weights.vec(); |
14623 | } |
14624 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14625 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors1) || at::functionalization::impl::isFunctionalTensor(weights))) { |
14626 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14627 | TORCH_INTERNAL_ASSERT(false, |
14628 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14629 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14630 | } else { |
14631 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14632 | at::AutoDispatchSkipFunctionalize guard; |
14633 | at::_ops::_foreach_lerp__List::call(self_, tensors1_, weights_); |
14634 | ; |
14635 | } |
14636 | } else { |
14637 | ::std::vector<at::Tensor> tmp_output; |
14638 | { |
14639 | at::AutoDispatchSkipFunctionalize guard; |
14640 | tmp_output = at::_ops::_foreach_lerp_List::call(self_, tensors1_, weights_); |
14641 | } |
14642 | at::functionalization::impl::replace_(self, tmp_output); |
14643 | at::functionalization::impl::commit_update(self); |
14644 | at::functionalization::impl::sync(self); |
14645 | |
14646 | } |
14647 | } |
14648 | |
14649 | void _foreach_lerp_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) { |
14650 | if (false) { |
14651 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14652 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14653 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14654 | auto self_meta = to_meta(self); |
14655 | auto tensors1_meta = to_meta(tensors1); |
14656 | auto out_meta = to_meta(out); |
14657 | at::AutoDispatchSkipFunctionalize func_guard; |
14658 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14659 | at::_ops::_foreach_lerp_Scalar_out::call(self_meta, tensors1_meta, weight, out_meta); |
14660 | } |
14661 | |
14662 | ::std::vector<at::Tensor> self_; |
14663 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14664 | at::functionalization::impl::sync(self); |
14665 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14666 | } else { |
14667 | self_ = self.vec(); |
14668 | } |
14669 | |
14670 | ::std::vector<at::Tensor> tensors1_; |
14671 | if (at::functionalization::impl::isFunctionalTensor(tensors1)) { |
14672 | at::functionalization::impl::sync(tensors1); |
14673 | tensors1_ = at::functionalization::impl::from_functional_tensor(tensors1); |
14674 | } else { |
14675 | tensors1_ = tensors1.vec(); |
14676 | } |
14677 | |
14678 | ::std::vector<at::Tensor> out_; |
14679 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14680 | at::functionalization::impl::sync(out); |
14681 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14682 | } else { |
14683 | out_ = out.vec(); |
14684 | } |
14685 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14686 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensors1))) { |
14687 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14688 | TORCH_INTERNAL_ASSERT(false, |
14689 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14690 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14691 | } else { |
14692 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14693 | at::AutoDispatchSkipFunctionalize guard; |
14694 | at::_ops::_foreach_lerp_Scalar_out::call(self_, tensors1_, weight, out_); |
14695 | ; |
14696 | } |
14697 | } else { |
14698 | ::std::vector<at::Tensor> tmp_output; |
14699 | { |
14700 | at::AutoDispatchSkipFunctionalize guard; |
14701 | tmp_output = at::_ops::_foreach_lerp_Scalar::call(self_, tensors1_, weight); |
14702 | } |
14703 | at::functionalization::impl::replace_(out, tmp_output); |
14704 | at::functionalization::impl::commit_update(out); |
14705 | at::functionalization::impl::sync(out); |
14706 | |
14707 | } |
14708 | } |
14709 | |
14710 | void _foreach_lerp__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) { |
14711 | if (true) { |
14712 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14713 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14714 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14715 | auto self_meta = to_meta(self); |
14716 | auto tensors1_meta = to_meta(tensors1); |
14717 | at::AutoDispatchSkipFunctionalize func_guard; |
14718 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14719 | at::_ops::_foreach_lerp__Scalar::call(self_meta, tensors1_meta, weight); |
14720 | } |
14721 | |
14722 | ::std::vector<at::Tensor> self_; |
14723 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14724 | at::functionalization::impl::sync(self); |
14725 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14726 | } else { |
14727 | self_ = self.vec(); |
14728 | } |
14729 | |
14730 | ::std::vector<at::Tensor> tensors1_; |
14731 | if (at::functionalization::impl::isFunctionalTensor(tensors1)) { |
14732 | at::functionalization::impl::sync(tensors1); |
14733 | tensors1_ = at::functionalization::impl::from_functional_tensor(tensors1); |
14734 | } else { |
14735 | tensors1_ = tensors1.vec(); |
14736 | } |
14737 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14738 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors1))) { |
14739 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14740 | TORCH_INTERNAL_ASSERT(false, |
14741 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14742 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14743 | } else { |
14744 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14745 | at::AutoDispatchSkipFunctionalize guard; |
14746 | at::_ops::_foreach_lerp__Scalar::call(self_, tensors1_, weight); |
14747 | ; |
14748 | } |
14749 | } else { |
14750 | ::std::vector<at::Tensor> tmp_output; |
14751 | { |
14752 | at::AutoDispatchSkipFunctionalize guard; |
14753 | tmp_output = at::_ops::_foreach_lerp_Scalar::call(self_, tensors1_, weight); |
14754 | } |
14755 | at::functionalization::impl::replace_(self, tmp_output); |
14756 | at::functionalization::impl::commit_update(self); |
14757 | at::functionalization::impl::sync(self); |
14758 | |
14759 | } |
14760 | } |
14761 | |
14762 | at::Tensor & _convert_indices_from_coo_to_csr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out) { |
14763 | if (false) { |
14764 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14765 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14766 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14767 | auto self_meta = to_meta(self); |
14768 | auto out_meta = to_meta(out); |
14769 | at::AutoDispatchSkipFunctionalize func_guard; |
14770 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14771 | at::_ops::_convert_indices_from_coo_to_csr_out::call(self_meta, size, out_int32, out_meta); |
14772 | } |
14773 | |
14774 | at::Tensor self_; |
14775 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14776 | at::functionalization::impl::sync(self); |
14777 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14778 | } else { |
14779 | self_ = self; |
14780 | } |
14781 | |
14782 | at::Tensor out_; |
14783 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14784 | at::functionalization::impl::sync(out); |
14785 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14786 | } else { |
14787 | out_ = out; |
14788 | } |
14789 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14790 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14791 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14792 | TORCH_INTERNAL_ASSERT(false, |
14793 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14794 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14795 | } else { |
14796 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14797 | at::AutoDispatchSkipFunctionalize guard; |
14798 | at::Tensor tmp_output = at::_ops::_convert_indices_from_coo_to_csr_out::call(self_, size, out_int32, out_); |
14799 | return out;; |
14800 | } |
14801 | } else { |
14802 | at::Tensor tmp_output; |
14803 | { |
14804 | at::AutoDispatchSkipFunctionalize guard; |
14805 | tmp_output = at::_ops::_convert_indices_from_coo_to_csr::call(self_, size, out_int32); |
14806 | } |
14807 | at::functionalization::impl::replace_(out, tmp_output); |
14808 | at::functionalization::impl::commit_update(out); |
14809 | at::functionalization::impl::sync(out); |
14810 | return out; |
14811 | } |
14812 | } |
14813 | |
14814 | at::Tensor & multi_margin_loss_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) { |
14815 | if (false) { |
14816 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14817 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14818 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14819 | auto grad_output_meta = to_meta(grad_output); |
14820 | auto self_meta = to_meta(self); |
14821 | auto target_meta = to_meta(target); |
14822 | auto weight_meta = to_meta(weight); |
14823 | auto grad_input_meta = to_meta(grad_input); |
14824 | at::AutoDispatchSkipFunctionalize func_guard; |
14825 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14826 | at::_ops::multi_margin_loss_backward_grad_input::call(grad_output_meta, self_meta, target_meta, p, margin, weight_meta, reduction, grad_input_meta); |
14827 | } |
14828 | |
14829 | at::Tensor grad_output_; |
14830 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
14831 | at::functionalization::impl::sync(grad_output); |
14832 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
14833 | } else { |
14834 | grad_output_ = grad_output; |
14835 | } |
14836 | |
14837 | at::Tensor self_; |
14838 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14839 | at::functionalization::impl::sync(self); |
14840 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14841 | } else { |
14842 | self_ = self; |
14843 | } |
14844 | |
14845 | at::Tensor target_; |
14846 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
14847 | at::functionalization::impl::sync(target); |
14848 | target_ = at::functionalization::impl::from_functional_tensor(target); |
14849 | } else { |
14850 | target_ = target; |
14851 | } |
14852 | |
14853 | c10::optional<at::Tensor> weight_; |
14854 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
14855 | at::functionalization::impl::sync(weight); |
14856 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
14857 | } else { |
14858 | weight_ = weight; |
14859 | } |
14860 | |
14861 | at::Tensor grad_input_; |
14862 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
14863 | at::functionalization::impl::sync(grad_input); |
14864 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
14865 | } else { |
14866 | grad_input_ = grad_input; |
14867 | } |
14868 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
14869 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) { |
14870 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14871 | TORCH_INTERNAL_ASSERT(false, |
14872 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14873 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14874 | } else { |
14875 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14876 | at::AutoDispatchSkipFunctionalize guard; |
14877 | at::Tensor tmp_output = at::_ops::multi_margin_loss_backward_grad_input::call(grad_output_, self_, target_, p, margin, weight_, reduction, grad_input_); |
14878 | return grad_input;; |
14879 | } |
14880 | } else { |
14881 | at::Tensor tmp_output; |
14882 | { |
14883 | at::AutoDispatchSkipFunctionalize guard; |
14884 | tmp_output = at::_ops::multi_margin_loss_backward::call(grad_output_, self_, target_, p, margin, weight_, reduction); |
14885 | } |
14886 | at::functionalization::impl::replace_(grad_input, tmp_output); |
14887 | at::functionalization::impl::commit_update(grad_input); |
14888 | at::functionalization::impl::sync(grad_input); |
14889 | return grad_input; |
14890 | } |
14891 | } |
14892 | |
14893 | ::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) { |
14894 | if (false) { |
14895 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14896 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14897 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14898 | auto self_meta = to_meta(self); |
14899 | auto target_meta = to_meta(target); |
14900 | auto weight_meta = to_meta(weight); |
14901 | auto output_meta = to_meta(output); |
14902 | auto total_weight_meta = to_meta(total_weight); |
14903 | at::AutoDispatchSkipFunctionalize func_guard; |
14904 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14905 | at::_ops::nll_loss_forward_output::call(self_meta, target_meta, weight_meta, reduction, ignore_index, output_meta, total_weight_meta); |
14906 | } |
14907 | |
14908 | at::Tensor self_; |
14909 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14910 | at::functionalization::impl::sync(self); |
14911 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14912 | } else { |
14913 | self_ = self; |
14914 | } |
14915 | |
14916 | at::Tensor target_; |
14917 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
14918 | at::functionalization::impl::sync(target); |
14919 | target_ = at::functionalization::impl::from_functional_tensor(target); |
14920 | } else { |
14921 | target_ = target; |
14922 | } |
14923 | |
14924 | c10::optional<at::Tensor> weight_; |
14925 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
14926 | at::functionalization::impl::sync(weight); |
14927 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
14928 | } else { |
14929 | weight_ = weight; |
14930 | } |
14931 | |
14932 | at::Tensor output_; |
14933 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
14934 | at::functionalization::impl::sync(output); |
14935 | output_ = at::functionalization::impl::from_functional_tensor(output); |
14936 | } else { |
14937 | output_ = output; |
14938 | } |
14939 | |
14940 | at::Tensor total_weight_; |
14941 | if (at::functionalization::impl::isFunctionalTensor(total_weight)) { |
14942 | at::functionalization::impl::sync(total_weight); |
14943 | total_weight_ = at::functionalization::impl::from_functional_tensor(total_weight); |
14944 | } else { |
14945 | total_weight_ = total_weight; |
14946 | } |
14947 | if (!(true && at::functionalization::impl::isFunctionalTensor(output) && at::functionalization::impl::isFunctionalTensor(total_weight))) { |
14948 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) { |
14949 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14950 | TORCH_INTERNAL_ASSERT(false, |
14951 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14952 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14953 | } else { |
14954 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14955 | at::AutoDispatchSkipFunctionalize guard; |
14956 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::nll_loss_forward_output::call(self_, target_, weight_, reduction, ignore_index, output_, total_weight_); |
14957 | return ::std::tuple<at::Tensor &,at::Tensor &>(output, total_weight);; |
14958 | } |
14959 | } else { |
14960 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
14961 | { |
14962 | at::AutoDispatchSkipFunctionalize guard; |
14963 | tmp_output = at::_ops::nll_loss_forward::call(self_, target_, weight_, reduction, ignore_index); |
14964 | } |
14965 | at::functionalization::impl::replace_(output, std::get<0>(tmp_output)); |
14966 | at::functionalization::impl::commit_update(output); |
14967 | at::functionalization::impl::sync(output); |
14968 | at::functionalization::impl::replace_(total_weight, std::get<1>(tmp_output)); |
14969 | at::functionalization::impl::commit_update(total_weight); |
14970 | at::functionalization::impl::sync(total_weight); |
14971 | return ::std::tuple<at::Tensor &,at::Tensor &>(output, total_weight); |
14972 | } |
14973 | } |
14974 | |
14975 | at::Tensor & hardswish_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
14976 | if (false) { |
14977 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14978 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14979 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14980 | auto self_meta = to_meta(self); |
14981 | auto out_meta = to_meta(out); |
14982 | at::AutoDispatchSkipFunctionalize func_guard; |
14983 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14984 | at::_ops::hardswish_out::call(self_meta, out_meta); |
14985 | } |
14986 | |
14987 | at::Tensor self_; |
14988 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14989 | at::functionalization::impl::sync(self); |
14990 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14991 | } else { |
14992 | self_ = self; |
14993 | } |
14994 | |
14995 | at::Tensor out_; |
14996 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14997 | at::functionalization::impl::sync(out); |
14998 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14999 | } else { |
15000 | out_ = out; |
15001 | } |
15002 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15003 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15004 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15005 | TORCH_INTERNAL_ASSERT(false, |
15006 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15007 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15008 | } else { |
15009 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15010 | at::AutoDispatchSkipFunctionalize guard; |
15011 | at::Tensor tmp_output = at::_ops::hardswish_out::call(self_, out_); |
15012 | return out;; |
15013 | } |
15014 | } else { |
15015 | at::Tensor tmp_output; |
15016 | { |
15017 | at::AutoDispatchSkipFunctionalize guard; |
15018 | tmp_output = at::_ops::hardswish::call(self_); |
15019 | } |
15020 | at::functionalization::impl::replace_(out, tmp_output); |
15021 | at::functionalization::impl::commit_update(out); |
15022 | at::functionalization::impl::sync(out); |
15023 | return out; |
15024 | } |
15025 | } |
15026 | |
15027 | at::Tensor & hardswish_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
15028 | if (true) { |
15029 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15030 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15031 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15032 | auto self_meta = to_meta(self); |
15033 | at::AutoDispatchSkipFunctionalize func_guard; |
15034 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15035 | at::_ops::hardswish_::call(self_meta); |
15036 | } |
15037 | |
15038 | at::Tensor self_; |
15039 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15040 | at::functionalization::impl::sync(self); |
15041 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15042 | } else { |
15043 | self_ = self; |
15044 | } |
15045 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15046 | if ((false)) { |
15047 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15048 | TORCH_INTERNAL_ASSERT(false, |
15049 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15050 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15051 | } else { |
15052 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15053 | at::AutoDispatchSkipFunctionalize guard; |
15054 | at::Tensor tmp_output = at::_ops::hardswish_::call(self_); |
15055 | return self;; |
15056 | } |
15057 | } else { |
15058 | at::Tensor tmp_output; |
15059 | { |
15060 | at::AutoDispatchSkipFunctionalize guard; |
15061 | tmp_output = at::_ops::hardswish::call(self_); |
15062 | } |
15063 | at::functionalization::impl::replace_(self, tmp_output); |
15064 | at::functionalization::impl::commit_update(self); |
15065 | at::functionalization::impl::sync(self); |
15066 | return self; |
15067 | } |
15068 | } |
15069 | |
15070 | at::Tensor & hardswish_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
15071 | if (false) { |
15072 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15073 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15074 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15075 | auto grad_output_meta = to_meta(grad_output); |
15076 | auto self_meta = to_meta(self); |
15077 | auto out_meta = to_meta(out); |
15078 | at::AutoDispatchSkipFunctionalize func_guard; |
15079 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15080 | at::_ops::hardswish_backward_out::call(grad_output_meta, self_meta, out_meta); |
15081 | } |
15082 | |
15083 | at::Tensor grad_output_; |
15084 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
15085 | at::functionalization::impl::sync(grad_output); |
15086 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
15087 | } else { |
15088 | grad_output_ = grad_output; |
15089 | } |
15090 | |
15091 | at::Tensor self_; |
15092 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15093 | at::functionalization::impl::sync(self); |
15094 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15095 | } else { |
15096 | self_ = self; |
15097 | } |
15098 | |
15099 | at::Tensor out_; |
15100 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15101 | at::functionalization::impl::sync(out); |
15102 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15103 | } else { |
15104 | out_ = out; |
15105 | } |
15106 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15107 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
15108 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15109 | TORCH_INTERNAL_ASSERT(false, |
15110 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15111 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15112 | } else { |
15113 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15114 | at::AutoDispatchSkipFunctionalize guard; |
15115 | at::Tensor tmp_output = at::_ops::hardswish_backward_out::call(grad_output_, self_, out_); |
15116 | return out;; |
15117 | } |
15118 | } else { |
15119 | at::Tensor tmp_output; |
15120 | { |
15121 | at::AutoDispatchSkipFunctionalize guard; |
15122 | tmp_output = at::_ops::hardswish_backward::call(grad_output_, self_); |
15123 | } |
15124 | at::functionalization::impl::replace_(out, tmp_output); |
15125 | at::functionalization::impl::commit_update(out); |
15126 | at::functionalization::impl::sync(out); |
15127 | return out; |
15128 | } |
15129 | } |
15130 | |
15131 | at::Tensor & softshrink_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) { |
15132 | if (false) { |
15133 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15134 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15135 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15136 | auto grad_output_meta = to_meta(grad_output); |
15137 | auto self_meta = to_meta(self); |
15138 | auto grad_input_meta = to_meta(grad_input); |
15139 | at::AutoDispatchSkipFunctionalize func_guard; |
15140 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15141 | at::_ops::softshrink_backward_grad_input::call(grad_output_meta, self_meta, lambd, grad_input_meta); |
15142 | } |
15143 | |
15144 | at::Tensor grad_output_; |
15145 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
15146 | at::functionalization::impl::sync(grad_output); |
15147 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
15148 | } else { |
15149 | grad_output_ = grad_output; |
15150 | } |
15151 | |
15152 | at::Tensor self_; |
15153 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15154 | at::functionalization::impl::sync(self); |
15155 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15156 | } else { |
15157 | self_ = self; |
15158 | } |
15159 | |
15160 | at::Tensor grad_input_; |
15161 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
15162 | at::functionalization::impl::sync(grad_input); |
15163 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
15164 | } else { |
15165 | grad_input_ = grad_input; |
15166 | } |
15167 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
15168 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
15169 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15170 | TORCH_INTERNAL_ASSERT(false, |
15171 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15172 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15173 | } else { |
15174 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15175 | at::AutoDispatchSkipFunctionalize guard; |
15176 | at::Tensor tmp_output = at::_ops::softshrink_backward_grad_input::call(grad_output_, self_, lambd, grad_input_); |
15177 | return grad_input;; |
15178 | } |
15179 | } else { |
15180 | at::Tensor tmp_output; |
15181 | { |
15182 | at::AutoDispatchSkipFunctionalize guard; |
15183 | tmp_output = at::_ops::softshrink_backward::call(grad_output_, self_, lambd); |
15184 | } |
15185 | at::functionalization::impl::replace_(grad_input, tmp_output); |
15186 | at::functionalization::impl::commit_update(grad_input); |
15187 | at::functionalization::impl::sync(grad_input); |
15188 | return grad_input; |
15189 | } |
15190 | } |
15191 | |
15192 | at::Tensor & mkldnn_adaptive_avg_pool2d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
15193 | if (false) { |
15194 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15195 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15196 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15197 | auto grad_output_meta = to_meta(grad_output); |
15198 | auto self_meta = to_meta(self); |
15199 | auto out_meta = to_meta(out); |
15200 | at::AutoDispatchSkipFunctionalize func_guard; |
15201 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15202 | at::_ops::mkldnn_adaptive_avg_pool2d_backward_out::call(grad_output_meta, self_meta, out_meta); |
15203 | } |
15204 | |
15205 | at::Tensor grad_output_; |
15206 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
15207 | at::functionalization::impl::sync(grad_output); |
15208 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
15209 | } else { |
15210 | grad_output_ = grad_output; |
15211 | } |
15212 | |
15213 | at::Tensor self_; |
15214 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15215 | at::functionalization::impl::sync(self); |
15216 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15217 | } else { |
15218 | self_ = self; |
15219 | } |
15220 | |
15221 | at::Tensor out_; |
15222 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15223 | at::functionalization::impl::sync(out); |
15224 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15225 | } else { |
15226 | out_ = out; |
15227 | } |
15228 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15229 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
15230 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15231 | TORCH_INTERNAL_ASSERT(false, |
15232 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15233 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15234 | } else { |
15235 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15236 | at::AutoDispatchSkipFunctionalize guard; |
15237 | at::Tensor tmp_output = at::_ops::mkldnn_adaptive_avg_pool2d_backward_out::call(grad_output_, self_, out_); |
15238 | return out;; |
15239 | } |
15240 | } else { |
15241 | at::Tensor tmp_output; |
15242 | { |
15243 | at::AutoDispatchSkipFunctionalize guard; |
15244 | tmp_output = at::_ops::mkldnn_adaptive_avg_pool2d_backward::call(grad_output_, self_); |
15245 | } |
15246 | at::functionalization::impl::replace_(out, tmp_output); |
15247 | at::functionalization::impl::commit_update(out); |
15248 | at::functionalization::impl::sync(out); |
15249 | return out; |
15250 | } |
15251 | } |
15252 | |
15253 | at::Tensor & _adaptive_avg_pool2d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { |
15254 | if (false) { |
15255 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15256 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15257 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15258 | auto grad_output_meta = to_meta(grad_output); |
15259 | auto self_meta = to_meta(self); |
15260 | auto out_meta = to_meta(out); |
15261 | at::AutoDispatchSkipFunctionalize func_guard; |
15262 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15263 | at::_ops::_adaptive_avg_pool2d_backward_out::call(grad_output_meta, self_meta, out_meta); |
15264 | } |
15265 | |
15266 | at::Tensor grad_output_; |
15267 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
15268 | at::functionalization::impl::sync(grad_output); |
15269 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
15270 | } else { |
15271 | grad_output_ = grad_output; |
15272 | } |
15273 | |
15274 | at::Tensor self_; |
15275 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15276 | at::functionalization::impl::sync(self); |
15277 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15278 | } else { |
15279 | self_ = self; |
15280 | } |
15281 | |
15282 | at::Tensor out_; |
15283 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15284 | at::functionalization::impl::sync(out); |
15285 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15286 | } else { |
15287 | out_ = out; |
15288 | } |
15289 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15290 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
15291 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15292 | TORCH_INTERNAL_ASSERT(false, |
15293 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15294 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15295 | } else { |
15296 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15297 | at::AutoDispatchSkipFunctionalize guard; |
15298 | at::Tensor tmp_output = at::_ops::_adaptive_avg_pool2d_backward_out::call(grad_output_, self_, out_); |
15299 | return out;; |
15300 | } |
15301 | } else { |
15302 | at::Tensor tmp_output; |
15303 | { |
15304 | at::AutoDispatchSkipFunctionalize guard; |
15305 | tmp_output = at::_ops::_adaptive_avg_pool2d_backward::call(grad_output_, self_); |
15306 | } |
15307 | at::functionalization::impl::replace_(out, tmp_output); |
15308 | at::functionalization::impl::commit_update(out); |
15309 | at::functionalization::impl::sync(out); |
15310 | return out; |
15311 | } |
15312 | } |
15313 | |
15314 | at::Tensor & adaptive_avg_pool3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { |
15315 | if (false) { |
15316 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15317 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15318 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15319 | auto self_meta = to_meta(self); |
15320 | auto out_meta = to_meta(out); |
15321 | at::AutoDispatchSkipFunctionalize func_guard; |
15322 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15323 | at::_ops::adaptive_avg_pool3d_out::call(self_meta, output_size, out_meta); |
15324 | } |
15325 | |
15326 | at::Tensor self_; |
15327 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15328 | at::functionalization::impl::sync(self); |
15329 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15330 | } else { |
15331 | self_ = self; |
15332 | } |
15333 | |
15334 | at::Tensor out_; |
15335 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15336 | at::functionalization::impl::sync(out); |
15337 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15338 | } else { |
15339 | out_ = out; |
15340 | } |
15341 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15342 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15343 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15344 | TORCH_INTERNAL_ASSERT(false, |
15345 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15346 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15347 | } else { |
15348 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15349 | at::AutoDispatchSkipFunctionalize guard; |
15350 | at::Tensor tmp_output = at::_ops::adaptive_avg_pool3d_out::call(self_, output_size, out_); |
15351 | return out;; |
15352 | } |
15353 | } else { |
15354 | at::Tensor tmp_output; |
15355 | { |
15356 | at::AutoDispatchSkipFunctionalize guard; |
15357 | tmp_output = at::_ops::adaptive_avg_pool3d::call(self_, output_size); |
15358 | } |
15359 | at::functionalization::impl::replace_(out, tmp_output); |
15360 | at::functionalization::impl::commit_update(out); |
15361 | at::functionalization::impl::sync(out); |
15362 | return out; |
15363 | } |
15364 | } |
15365 | |
15366 | at::Tensor & adaptive_max_pool3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) { |
15367 | if (false) { |
15368 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15369 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15370 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15371 | auto grad_output_meta = to_meta(grad_output); |
15372 | auto self_meta = to_meta(self); |
15373 | auto indices_meta = to_meta(indices); |
15374 | auto grad_input_meta = to_meta(grad_input); |
15375 | at::AutoDispatchSkipFunctionalize func_guard; |
15376 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15377 | at::_ops::adaptive_max_pool3d_backward_grad_input::call(grad_output_meta, self_meta, indices_meta, grad_input_meta); |
15378 | } |
15379 | |
15380 | at::Tensor grad_output_; |
15381 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
15382 | at::functionalization::impl::sync(grad_output); |
15383 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
15384 | } else { |
15385 | grad_output_ = grad_output; |
15386 | } |
15387 | |
15388 | at::Tensor self_; |
15389 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15390 | at::functionalization::impl::sync(self); |
15391 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15392 | } else { |
15393 | self_ = self; |
15394 | } |
15395 | |
15396 | at::Tensor indices_; |
15397 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
15398 | at::functionalization::impl::sync(indices); |
15399 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
15400 | } else { |
15401 | indices_ = indices; |
15402 | } |
15403 | |
15404 | at::Tensor grad_input_; |
15405 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
15406 | at::functionalization::impl::sync(grad_input); |
15407 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
15408 | } else { |
15409 | grad_input_ = grad_input; |
15410 | } |
15411 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
15412 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) { |
15413 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15414 | TORCH_INTERNAL_ASSERT(false, |
15415 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15416 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15417 | } else { |
15418 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15419 | at::AutoDispatchSkipFunctionalize guard; |
15420 | at::Tensor tmp_output = at::_ops::adaptive_max_pool3d_backward_grad_input::call(grad_output_, self_, indices_, grad_input_); |
15421 | return grad_input;; |
15422 | } |
15423 | } else { |
15424 | at::Tensor tmp_output; |
15425 | { |
15426 | at::AutoDispatchSkipFunctionalize guard; |
15427 | tmp_output = at::_ops::adaptive_max_pool3d_backward::call(grad_output_, self_, indices_); |
15428 | } |
15429 | at::functionalization::impl::replace_(grad_input, tmp_output); |
15430 | at::functionalization::impl::commit_update(grad_input); |
15431 | at::functionalization::impl::sync(grad_input); |
15432 | return grad_input; |
15433 | } |
15434 | } |
15435 | |
15436 | at::Tensor & avg_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out) { |
15437 | if (false) { |
15438 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15439 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15440 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15441 | auto self_meta = to_meta(self); |
15442 | auto out_meta = to_meta(out); |
15443 | at::AutoDispatchSkipFunctionalize func_guard; |
15444 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15445 | at::_ops::avg_pool2d_out::call(self_meta, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out_meta); |
15446 | } |
15447 | |
15448 | at::Tensor self_; |
15449 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15450 | at::functionalization::impl::sync(self); |
15451 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15452 | } else { |
15453 | self_ = self; |
15454 | } |
15455 | |
15456 | at::Tensor out_; |
15457 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15458 | at::functionalization::impl::sync(out); |
15459 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15460 | } else { |
15461 | out_ = out; |
15462 | } |
15463 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15464 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15465 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15466 | TORCH_INTERNAL_ASSERT(false, |
15467 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15468 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15469 | } else { |
15470 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15471 | at::AutoDispatchSkipFunctionalize guard; |
15472 | at::Tensor tmp_output = at::_ops::avg_pool2d_out::call(self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out_); |
15473 | return out;; |
15474 | } |
15475 | } else { |
15476 | at::Tensor tmp_output; |
15477 | { |
15478 | at::AutoDispatchSkipFunctionalize guard; |
15479 | tmp_output = at::_ops::avg_pool2d::call(self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); |
15480 | } |
15481 | at::functionalization::impl::replace_(out, tmp_output); |
15482 | at::functionalization::impl::commit_update(out); |
15483 | at::functionalization::impl::sync(out); |
15484 | return out; |
15485 | } |
15486 | } |
15487 | |
15488 | at::Tensor & avg_pool3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input) { |
15489 | if (false) { |
15490 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15491 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15492 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15493 | auto grad_output_meta = to_meta(grad_output); |
15494 | auto self_meta = to_meta(self); |
15495 | auto grad_input_meta = to_meta(grad_input); |
15496 | at::AutoDispatchSkipFunctionalize func_guard; |
15497 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15498 | at::_ops::avg_pool3d_backward_grad_input::call(grad_output_meta, self_meta, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input_meta); |
15499 | } |
15500 | |
15501 | at::Tensor grad_output_; |
15502 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
15503 | at::functionalization::impl::sync(grad_output); |
15504 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
15505 | } else { |
15506 | grad_output_ = grad_output; |
15507 | } |
15508 | |
15509 | at::Tensor self_; |
15510 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15511 | at::functionalization::impl::sync(self); |
15512 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15513 | } else { |
15514 | self_ = self; |
15515 | } |
15516 | |
15517 | at::Tensor grad_input_; |
15518 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
15519 | at::functionalization::impl::sync(grad_input); |
15520 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
15521 | } else { |
15522 | grad_input_ = grad_input; |
15523 | } |
15524 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
15525 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
15526 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15527 | TORCH_INTERNAL_ASSERT(false, |
15528 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15529 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15530 | } else { |
15531 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15532 | at::AutoDispatchSkipFunctionalize guard; |
15533 | at::Tensor tmp_output = at::_ops::avg_pool3d_backward_grad_input::call(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input_); |
15534 | return grad_input;; |
15535 | } |
15536 | } else { |
15537 | at::Tensor tmp_output; |
15538 | { |
15539 | at::AutoDispatchSkipFunctionalize guard; |
15540 | tmp_output = at::_ops::avg_pool3d_backward::call(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); |
15541 | } |
15542 | at::functionalization::impl::replace_(grad_input, tmp_output); |
15543 | at::functionalization::impl::commit_update(grad_input); |
15544 | at::functionalization::impl::sync(grad_input); |
15545 | return grad_input; |
15546 | } |
15547 | } |
15548 | |
15549 | ::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool2d_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) { |
15550 | if (false) { |
15551 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15552 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15553 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15554 | auto self_meta = to_meta(self); |
15555 | auto random_samples_meta = to_meta(random_samples); |
15556 | auto output_meta = to_meta(output); |
15557 | auto indices_meta = to_meta(indices); |
15558 | at::AutoDispatchSkipFunctionalize func_guard; |
15559 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15560 | at::_ops::fractional_max_pool2d_output::call(self_meta, kernel_size, output_size, random_samples_meta, output_meta, indices_meta); |
15561 | } |
15562 | |
15563 | at::Tensor self_; |
15564 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15565 | at::functionalization::impl::sync(self); |
15566 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15567 | } else { |
15568 | self_ = self; |
15569 | } |
15570 | |
15571 | at::Tensor random_samples_; |
15572 | if (at::functionalization::impl::isFunctionalTensor(random_samples)) { |
15573 | at::functionalization::impl::sync(random_samples); |
15574 | random_samples_ = at::functionalization::impl::from_functional_tensor(random_samples); |
15575 | } else { |
15576 | random_samples_ = random_samples; |
15577 | } |
15578 | |
15579 | at::Tensor output_; |
15580 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
15581 | at::functionalization::impl::sync(output); |
15582 | output_ = at::functionalization::impl::from_functional_tensor(output); |
15583 | } else { |
15584 | output_ = output; |
15585 | } |
15586 | |
15587 | at::Tensor indices_; |
15588 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
15589 | at::functionalization::impl::sync(indices); |
15590 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
15591 | } else { |
15592 | indices_ = indices; |
15593 | } |
15594 | if (!(true && at::functionalization::impl::isFunctionalTensor(output) && at::functionalization::impl::isFunctionalTensor(indices))) { |
15595 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(random_samples))) { |
15596 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15597 | TORCH_INTERNAL_ASSERT(false, |
15598 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15599 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15600 | } else { |
15601 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15602 | at::AutoDispatchSkipFunctionalize guard; |
15603 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::fractional_max_pool2d_output::call(self_, kernel_size, output_size, random_samples_, output_, indices_); |
15604 | return ::std::tuple<at::Tensor &,at::Tensor &>(output, indices);; |
15605 | } |
15606 | } else { |
15607 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
15608 | { |
15609 | at::AutoDispatchSkipFunctionalize guard; |
15610 | tmp_output = at::_ops::fractional_max_pool2d::call(self_, kernel_size, output_size, random_samples_); |
15611 | } |
15612 | at::functionalization::impl::replace_(output, std::get<0>(tmp_output)); |
15613 | at::functionalization::impl::commit_update(output); |
15614 | at::functionalization::impl::sync(output); |
15615 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
15616 | at::functionalization::impl::commit_update(indices); |
15617 | at::functionalization::impl::sync(indices); |
15618 | return ::std::tuple<at::Tensor &,at::Tensor &>(output, indices); |
15619 | } |
15620 | } |
15621 | |
15622 | at::Tensor & reflection_pad3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { |
15623 | if (false) { |
15624 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15625 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15626 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15627 | auto grad_output_meta = to_meta(grad_output); |
15628 | auto self_meta = to_meta(self); |
15629 | auto grad_input_meta = to_meta(grad_input); |
15630 | at::AutoDispatchSkipFunctionalize func_guard; |
15631 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15632 | at::_ops::reflection_pad3d_backward_grad_input::call(grad_output_meta, self_meta, padding, grad_input_meta); |
15633 | } |
15634 | |
15635 | at::Tensor grad_output_; |
15636 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
15637 | at::functionalization::impl::sync(grad_output); |
15638 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
15639 | } else { |
15640 | grad_output_ = grad_output; |
15641 | } |
15642 | |
15643 | at::Tensor self_; |
15644 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15645 | at::functionalization::impl::sync(self); |
15646 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15647 | } else { |
15648 | self_ = self; |
15649 | } |
15650 | |
15651 | at::Tensor grad_input_; |
15652 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
15653 | at::functionalization::impl::sync(grad_input); |
15654 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
15655 | } else { |
15656 | grad_input_ = grad_input; |
15657 | } |
15658 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
15659 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
15660 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15661 | TORCH_INTERNAL_ASSERT(false, |
15662 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15663 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15664 | } else { |
15665 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15666 | at::AutoDispatchSkipFunctionalize guard; |
15667 | at::Tensor tmp_output = at::_ops::reflection_pad3d_backward_grad_input::call(grad_output_, self_, padding, grad_input_); |
15668 | return grad_input;; |
15669 | } |
15670 | } else { |
15671 | at::Tensor tmp_output; |
15672 | { |
15673 | at::AutoDispatchSkipFunctionalize guard; |
15674 | tmp_output = at::_ops::reflection_pad3d_backward::call(grad_output_, self_, padding); |
15675 | } |
15676 | at::functionalization::impl::replace_(grad_input, tmp_output); |
15677 | at::functionalization::impl::commit_update(grad_input); |
15678 | at::functionalization::impl::sync(grad_input); |
15679 | return grad_input; |
15680 | } |
15681 | } |
15682 | |
15683 | at::Tensor & replication_pad2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { |
15684 | if (false) { |
15685 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15686 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15687 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15688 | auto self_meta = to_meta(self); |
15689 | auto out_meta = to_meta(out); |
15690 | at::AutoDispatchSkipFunctionalize func_guard; |
15691 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15692 | at::_ops::replication_pad2d_out::call(self_meta, padding, out_meta); |
15693 | } |
15694 | |
15695 | at::Tensor self_; |
15696 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15697 | at::functionalization::impl::sync(self); |
15698 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15699 | } else { |
15700 | self_ = self; |
15701 | } |
15702 | |
15703 | at::Tensor out_; |
15704 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15705 | at::functionalization::impl::sync(out); |
15706 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15707 | } else { |
15708 | out_ = out; |
15709 | } |
15710 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15711 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15712 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15713 | TORCH_INTERNAL_ASSERT(false, |
15714 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15715 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15716 | } else { |
15717 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15718 | at::AutoDispatchSkipFunctionalize guard; |
15719 | at::Tensor tmp_output = at::_ops::replication_pad2d_out::call(self_, padding, out_); |
15720 | return out;; |
15721 | } |
15722 | } else { |
15723 | at::Tensor tmp_output; |
15724 | { |
15725 | at::AutoDispatchSkipFunctionalize guard; |
15726 | tmp_output = at::_ops::replication_pad2d::call(self_, padding); |
15727 | } |
15728 | at::functionalization::impl::replace_(out, tmp_output); |
15729 | at::functionalization::impl::commit_update(out); |
15730 | at::functionalization::impl::sync(out); |
15731 | return out; |
15732 | } |
15733 | } |
15734 | |
15735 | at::Tensor & _upsample_bilinear2d_aa_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) { |
15736 | if (false) { |
15737 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15738 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15739 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15740 | auto grad_output_meta = to_meta(grad_output); |
15741 | auto grad_input_meta = to_meta(grad_input); |
15742 | at::AutoDispatchSkipFunctionalize func_guard; |
15743 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15744 | at::_ops::_upsample_bilinear2d_aa_backward_grad_input::call(grad_output_meta, output_size, input_size, align_corners, scales_h, scales_w, grad_input_meta); |
15745 | } |
15746 | |
15747 | at::Tensor grad_output_; |
15748 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
15749 | at::functionalization::impl::sync(grad_output); |
15750 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
15751 | } else { |
15752 | grad_output_ = grad_output; |
15753 | } |
15754 | |
15755 | at::Tensor grad_input_; |
15756 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
15757 | at::functionalization::impl::sync(grad_input); |
15758 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
15759 | } else { |
15760 | grad_input_ = grad_input; |
15761 | } |
15762 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
15763 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) { |
15764 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15765 | TORCH_INTERNAL_ASSERT(false, |
15766 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15767 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15768 | } else { |
15769 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15770 | at::AutoDispatchSkipFunctionalize guard; |
15771 | at::Tensor tmp_output = at::_ops::_upsample_bilinear2d_aa_backward_grad_input::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w, grad_input_); |
15772 | return grad_input;; |
15773 | } |
15774 | } else { |
15775 | at::Tensor tmp_output; |
15776 | { |
15777 | at::AutoDispatchSkipFunctionalize guard; |
15778 | tmp_output = at::_ops::_upsample_bilinear2d_aa_backward::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w); |
15779 | } |
15780 | at::functionalization::impl::replace_(grad_input, tmp_output); |
15781 | at::functionalization::impl::commit_update(grad_input); |
15782 | at::functionalization::impl::sync(grad_input); |
15783 | return grad_input; |
15784 | } |
15785 | } |
15786 | |
15787 | at::Tensor & _upsample_bicubic2d_aa_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) { |
15788 | if (false) { |
15789 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15790 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15791 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15792 | auto self_meta = to_meta(self); |
15793 | auto out_meta = to_meta(out); |
15794 | at::AutoDispatchSkipFunctionalize func_guard; |
15795 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15796 | at::_ops::_upsample_bicubic2d_aa_out::call(self_meta, output_size, align_corners, scales_h, scales_w, out_meta); |
15797 | } |
15798 | |
15799 | at::Tensor self_; |
15800 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15801 | at::functionalization::impl::sync(self); |
15802 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15803 | } else { |
15804 | self_ = self; |
15805 | } |
15806 | |
15807 | at::Tensor out_; |
15808 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15809 | at::functionalization::impl::sync(out); |
15810 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15811 | } else { |
15812 | out_ = out; |
15813 | } |
15814 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15815 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15816 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15817 | TORCH_INTERNAL_ASSERT(false, |
15818 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15819 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15820 | } else { |
15821 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15822 | at::AutoDispatchSkipFunctionalize guard; |
15823 | at::Tensor tmp_output = at::_ops::_upsample_bicubic2d_aa_out::call(self_, output_size, align_corners, scales_h, scales_w, out_); |
15824 | return out;; |
15825 | } |
15826 | } else { |
15827 | at::Tensor tmp_output; |
15828 | { |
15829 | at::AutoDispatchSkipFunctionalize guard; |
15830 | tmp_output = at::_ops::_upsample_bicubic2d_aa::call(self_, output_size, align_corners, scales_h, scales_w); |
15831 | } |
15832 | at::functionalization::impl::replace_(out, tmp_output); |
15833 | at::functionalization::impl::commit_update(out); |
15834 | at::functionalization::impl::sync(out); |
15835 | return out; |
15836 | } |
15837 | } |
15838 | |
15839 | at::Tensor & upsample_trilinear3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) { |
15840 | if (false) { |
15841 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15842 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15843 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15844 | auto self_meta = to_meta(self); |
15845 | auto out_meta = to_meta(out); |
15846 | at::AutoDispatchSkipFunctionalize func_guard; |
15847 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15848 | at::_ops::upsample_trilinear3d_out::call(self_meta, output_size, align_corners, scales_d, scales_h, scales_w, out_meta); |
15849 | } |
15850 | |
15851 | at::Tensor self_; |
15852 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15853 | at::functionalization::impl::sync(self); |
15854 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15855 | } else { |
15856 | self_ = self; |
15857 | } |
15858 | |
15859 | at::Tensor out_; |
15860 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15861 | at::functionalization::impl::sync(out); |
15862 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15863 | } else { |
15864 | out_ = out; |
15865 | } |
15866 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15867 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15868 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15869 | TORCH_INTERNAL_ASSERT(false, |
15870 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15871 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15872 | } else { |
15873 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15874 | at::AutoDispatchSkipFunctionalize guard; |
15875 | at::Tensor tmp_output = at::_ops::upsample_trilinear3d_out::call(self_, output_size, align_corners, scales_d, scales_h, scales_w, out_); |
15876 | return out;; |
15877 | } |
15878 | } else { |
15879 | at::Tensor tmp_output; |
15880 | { |
15881 | at::AutoDispatchSkipFunctionalize guard; |
15882 | tmp_output = at::_ops::upsample_trilinear3d::call(self_, output_size, align_corners, scales_d, scales_h, scales_w); |
15883 | } |
15884 | at::functionalization::impl::replace_(out, tmp_output); |
15885 | at::functionalization::impl::commit_update(out); |
15886 | at::functionalization::impl::sync(out); |
15887 | return out; |
15888 | } |
15889 | } |
15890 | |
15891 | at::Tensor & _upsample_nearest_exact1d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) { |
15892 | if (false) { |
15893 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15894 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15895 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15896 | auto grad_output_meta = to_meta(grad_output); |
15897 | auto grad_input_meta = to_meta(grad_input); |
15898 | at::AutoDispatchSkipFunctionalize func_guard; |
15899 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15900 | at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output_meta, output_size, input_size, scales, grad_input_meta); |
15901 | } |
15902 | |
15903 | at::Tensor grad_output_; |
15904 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
15905 | at::functionalization::impl::sync(grad_output); |
15906 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
15907 | } else { |
15908 | grad_output_ = grad_output; |
15909 | } |
15910 | |
15911 | at::Tensor grad_input_; |
15912 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
15913 | at::functionalization::impl::sync(grad_input); |
15914 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
15915 | } else { |
15916 | grad_input_ = grad_input; |
15917 | } |
15918 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
15919 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) { |
15920 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15921 | TORCH_INTERNAL_ASSERT(false, |
15922 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15923 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15924 | } else { |
15925 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15926 | at::AutoDispatchSkipFunctionalize guard; |
15927 | at::Tensor tmp_output = at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output_, output_size, input_size, scales, grad_input_); |
15928 | return grad_input;; |
15929 | } |
15930 | } else { |
15931 | at::Tensor tmp_output; |
15932 | { |
15933 | at::AutoDispatchSkipFunctionalize guard; |
15934 | tmp_output = at::_ops::_upsample_nearest_exact1d_backward::call(grad_output_, output_size, input_size, scales); |
15935 | } |
15936 | at::functionalization::impl::replace_(grad_input, tmp_output); |
15937 | at::functionalization::impl::commit_update(grad_input); |
15938 | at::functionalization::impl::sync(grad_input); |
15939 | return grad_input; |
15940 | } |
15941 | } |
15942 | |
15943 | at::Tensor & upsample_nearest2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) { |
15944 | if (false) { |
15945 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15946 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15947 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15948 | auto grad_output_meta = to_meta(grad_output); |
15949 | auto grad_input_meta = to_meta(grad_input); |
15950 | at::AutoDispatchSkipFunctionalize func_guard; |
15951 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15952 | at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output_meta, output_size, input_size, scales_h, scales_w, grad_input_meta); |
15953 | } |
15954 | |
15955 | at::Tensor grad_output_; |
15956 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
15957 | at::functionalization::impl::sync(grad_output); |
15958 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
15959 | } else { |
15960 | grad_output_ = grad_output; |
15961 | } |
15962 | |
15963 | at::Tensor grad_input_; |
15964 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
15965 | at::functionalization::impl::sync(grad_input); |
15966 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
15967 | } else { |
15968 | grad_input_ = grad_input; |
15969 | } |
15970 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
15971 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) { |
15972 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15973 | TORCH_INTERNAL_ASSERT(false, |
15974 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15975 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15976 | } else { |
15977 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15978 | at::AutoDispatchSkipFunctionalize guard; |
15979 | at::Tensor tmp_output = at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output_, output_size, input_size, scales_h, scales_w, grad_input_); |
15980 | return grad_input;; |
15981 | } |
15982 | } else { |
15983 | at::Tensor tmp_output; |
15984 | { |
15985 | at::AutoDispatchSkipFunctionalize guard; |
15986 | tmp_output = at::_ops::upsample_nearest2d_backward::call(grad_output_, output_size, input_size, scales_h, scales_w); |
15987 | } |
15988 | at::functionalization::impl::replace_(grad_input, tmp_output); |
15989 | at::functionalization::impl::commit_update(grad_input); |
15990 | at::functionalization::impl::sync(grad_input); |
15991 | return grad_input; |
15992 | } |
15993 | } |
15994 | |
15995 | at::Tensor & tanh_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) { |
15996 | if (false) { |
15997 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15998 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15999 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16000 | auto grad_output_meta = to_meta(grad_output); |
16001 | auto output_meta = to_meta(output); |
16002 | auto grad_input_meta = to_meta(grad_input); |
16003 | at::AutoDispatchSkipFunctionalize func_guard; |
16004 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16005 | at::_ops::tanh_backward_grad_input::call(grad_output_meta, output_meta, grad_input_meta); |
16006 | } |
16007 | |
16008 | at::Tensor grad_output_; |
16009 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
16010 | at::functionalization::impl::sync(grad_output); |
16011 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
16012 | } else { |
16013 | grad_output_ = grad_output; |
16014 | } |
16015 | |
16016 | at::Tensor output_; |
16017 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
16018 | at::functionalization::impl::sync(output); |
16019 | output_ = at::functionalization::impl::from_functional_tensor(output); |
16020 | } else { |
16021 | output_ = output; |
16022 | } |
16023 | |
16024 | at::Tensor grad_input_; |
16025 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
16026 | at::functionalization::impl::sync(grad_input); |
16027 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
16028 | } else { |
16029 | grad_input_ = grad_input; |
16030 | } |
16031 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
16032 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output))) { |
16033 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16034 | TORCH_INTERNAL_ASSERT(false, |
16035 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16036 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16037 | } else { |
16038 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16039 | at::AutoDispatchSkipFunctionalize guard; |
16040 | at::Tensor tmp_output = at::_ops::tanh_backward_grad_input::call(grad_output_, output_, grad_input_); |
16041 | return grad_input;; |
16042 | } |
16043 | } else { |
16044 | at::Tensor tmp_output; |
16045 | { |
16046 | at::AutoDispatchSkipFunctionalize guard; |
16047 | tmp_output = at::_ops::tanh_backward::call(grad_output_, output_); |
16048 | } |
16049 | at::functionalization::impl::replace_(grad_input, tmp_output); |
16050 | at::functionalization::impl::commit_update(grad_input); |
16051 | at::functionalization::impl::sync(grad_input); |
16052 | return grad_input; |
16053 | } |
16054 | } |
16055 | |
16056 | const at::Tensor & _conv_depthwise2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, const at::Tensor & out) { |
16057 | if (false) { |
16058 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16059 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16060 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16061 | auto self_meta = to_meta(self); |
16062 | auto weight_meta = to_meta(weight); |
16063 | auto bias_meta = to_meta(bias); |
16064 | auto out_meta = to_meta(out); |
16065 | at::AutoDispatchSkipFunctionalize func_guard; |
16066 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16067 | at::_ops::_conv_depthwise2d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, dilation, out_meta); |
16068 | } |
16069 | |
16070 | at::Tensor self_; |
16071 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16072 | at::functionalization::impl::sync(self); |
16073 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16074 | } else { |
16075 | self_ = self; |
16076 | } |
16077 | |
16078 | at::Tensor weight_; |
16079 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
16080 | at::functionalization::impl::sync(weight); |
16081 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
16082 | } else { |
16083 | weight_ = weight; |
16084 | } |
16085 | |
16086 | c10::optional<at::Tensor> bias_; |
16087 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
16088 | at::functionalization::impl::sync(bias); |
16089 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
16090 | } else { |
16091 | bias_ = bias; |
16092 | } |
16093 | |
16094 | at::Tensor out_; |
16095 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16096 | at::functionalization::impl::sync(out); |
16097 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16098 | } else { |
16099 | out_ = out; |
16100 | } |
16101 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16102 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
16103 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16104 | TORCH_INTERNAL_ASSERT(false, |
16105 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16106 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16107 | } else { |
16108 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16109 | at::AutoDispatchSkipFunctionalize guard; |
16110 | at::Tensor tmp_output = at::_ops::_conv_depthwise2d_out::call(self_, weight_, kernel_size, bias_, stride, padding, dilation, out_); |
16111 | return out;; |
16112 | } |
16113 | } else { |
16114 | at::Tensor tmp_output; |
16115 | { |
16116 | at::AutoDispatchSkipFunctionalize guard; |
16117 | tmp_output = at::_ops::_conv_depthwise2d::call(self_, weight_, kernel_size, bias_, stride, padding, dilation); |
16118 | } |
16119 | at::functionalization::impl::replace_(out, tmp_output); |
16120 | at::functionalization::impl::commit_update(out); |
16121 | at::functionalization::impl::sync(out); |
16122 | return out; |
16123 | } |
16124 | } |
16125 | |
16126 | at::Tensor & col2im_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { |
16127 | if (false) { |
16128 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16129 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16130 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16131 | auto self_meta = to_meta(self); |
16132 | auto out_meta = to_meta(out); |
16133 | at::AutoDispatchSkipFunctionalize func_guard; |
16134 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16135 | at::_ops::col2im_out::call(self_meta, output_size, kernel_size, dilation, padding, stride, out_meta); |
16136 | } |
16137 | |
16138 | at::Tensor self_; |
16139 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16140 | at::functionalization::impl::sync(self); |
16141 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16142 | } else { |
16143 | self_ = self; |
16144 | } |
16145 | |
16146 | at::Tensor out_; |
16147 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16148 | at::functionalization::impl::sync(out); |
16149 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16150 | } else { |
16151 | out_ = out; |
16152 | } |
16153 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16154 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16155 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16156 | TORCH_INTERNAL_ASSERT(false, |
16157 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16158 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16159 | } else { |
16160 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16161 | at::AutoDispatchSkipFunctionalize guard; |
16162 | at::Tensor tmp_output = at::_ops::col2im_out::call(self_, output_size, kernel_size, dilation, padding, stride, out_); |
16163 | return out;; |
16164 | } |
16165 | } else { |
16166 | at::Tensor tmp_output; |
16167 | { |
16168 | at::AutoDispatchSkipFunctionalize guard; |
16169 | tmp_output = at::_ops::col2im::call(self_, output_size, kernel_size, dilation, padding, stride); |
16170 | } |
16171 | at::functionalization::impl::replace_(out, tmp_output); |
16172 | at::functionalization::impl::commit_update(out); |
16173 | at::functionalization::impl::sync(out); |
16174 | return out; |
16175 | } |
16176 | } |
16177 | |
16178 | at::Tensor & column_stack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { |
16179 | if (false) { |
16180 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16181 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16182 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16183 | auto tensors_meta = to_meta(tensors); |
16184 | auto out_meta = to_meta(out); |
16185 | at::AutoDispatchSkipFunctionalize func_guard; |
16186 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16187 | at::_ops::column_stack_out::call(tensors_meta, out_meta); |
16188 | } |
16189 | |
16190 | ::std::vector<at::Tensor> tensors_; |
16191 | if (at::functionalization::impl::isFunctionalTensor(tensors)) { |
16192 | at::functionalization::impl::sync(tensors); |
16193 | tensors_ = at::functionalization::impl::from_functional_tensor(tensors); |
16194 | } else { |
16195 | tensors_ = tensors.vec(); |
16196 | } |
16197 | |
16198 | at::Tensor out_; |
16199 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16200 | at::functionalization::impl::sync(out); |
16201 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16202 | } else { |
16203 | out_ = out; |
16204 | } |
16205 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16206 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) { |
16207 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16208 | TORCH_INTERNAL_ASSERT(false, |
16209 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16210 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16211 | } else { |
16212 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16213 | at::AutoDispatchSkipFunctionalize guard; |
16214 | at::Tensor tmp_output = at::_ops::column_stack_out::call(tensors_, out_); |
16215 | return out;; |
16216 | } |
16217 | } else { |
16218 | at::Tensor tmp_output; |
16219 | { |
16220 | at::AutoDispatchSkipFunctionalize guard; |
16221 | tmp_output = at::_ops::column_stack::call(tensors_); |
16222 | } |
16223 | at::functionalization::impl::replace_(out, tmp_output); |
16224 | at::functionalization::impl::commit_update(out); |
16225 | at::functionalization::impl::sync(out); |
16226 | return out; |
16227 | } |
16228 | } |
16229 | |
16230 | at::Tensor & im2col_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { |
16231 | if (false) { |
16232 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16233 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16234 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16235 | auto self_meta = to_meta(self); |
16236 | auto out_meta = to_meta(out); |
16237 | at::AutoDispatchSkipFunctionalize func_guard; |
16238 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16239 | at::_ops::im2col_out::call(self_meta, kernel_size, dilation, padding, stride, out_meta); |
16240 | } |
16241 | |
16242 | at::Tensor self_; |
16243 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16244 | at::functionalization::impl::sync(self); |
16245 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16246 | } else { |
16247 | self_ = self; |
16248 | } |
16249 | |
16250 | at::Tensor out_; |
16251 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16252 | at::functionalization::impl::sync(out); |
16253 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16254 | } else { |
16255 | out_ = out; |
16256 | } |
16257 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16258 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16259 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16260 | TORCH_INTERNAL_ASSERT(false, |
16261 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16262 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16263 | } else { |
16264 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16265 | at::AutoDispatchSkipFunctionalize guard; |
16266 | at::Tensor tmp_output = at::_ops::im2col_out::call(self_, kernel_size, dilation, padding, stride, out_); |
16267 | return out;; |
16268 | } |
16269 | } else { |
16270 | at::Tensor tmp_output; |
16271 | { |
16272 | at::AutoDispatchSkipFunctionalize guard; |
16273 | tmp_output = at::_ops::im2col::call(self_, kernel_size, dilation, padding, stride); |
16274 | } |
16275 | at::functionalization::impl::replace_(out, tmp_output); |
16276 | at::functionalization::impl::commit_update(out); |
16277 | at::functionalization::impl::sync(out); |
16278 | return out; |
16279 | } |
16280 | } |
16281 | |
16282 | at::Tensor & isinf_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
16283 | if (false) { |
16284 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16285 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16286 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16287 | auto self_meta = to_meta(self); |
16288 | auto out_meta = to_meta(out); |
16289 | at::AutoDispatchSkipFunctionalize func_guard; |
16290 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16291 | at::_ops::isinf_out::call(self_meta, out_meta); |
16292 | } |
16293 | |
16294 | at::Tensor self_; |
16295 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16296 | at::functionalization::impl::sync(self); |
16297 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16298 | } else { |
16299 | self_ = self; |
16300 | } |
16301 | |
16302 | at::Tensor out_; |
16303 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16304 | at::functionalization::impl::sync(out); |
16305 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16306 | } else { |
16307 | out_ = out; |
16308 | } |
16309 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16310 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16311 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16312 | TORCH_INTERNAL_ASSERT(false, |
16313 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16314 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16315 | } else { |
16316 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16317 | at::AutoDispatchSkipFunctionalize guard; |
16318 | at::Tensor tmp_output = at::_ops::isinf_out::call(self_, out_); |
16319 | return out;; |
16320 | } |
16321 | } else { |
16322 | at::Tensor tmp_output; |
16323 | { |
16324 | at::AutoDispatchSkipFunctionalize guard; |
16325 | tmp_output = at::_ops::isinf::call(self_); |
16326 | } |
16327 | at::functionalization::impl::replace_(out, tmp_output); |
16328 | at::functionalization::impl::commit_update(out); |
16329 | at::functionalization::impl::sync(out); |
16330 | return out; |
16331 | } |
16332 | } |
16333 | |
16334 | at::Tensor & isneginf_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
16335 | if (false) { |
16336 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16337 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16338 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16339 | auto self_meta = to_meta(self); |
16340 | auto out_meta = to_meta(out); |
16341 | at::AutoDispatchSkipFunctionalize func_guard; |
16342 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16343 | at::_ops::isneginf_out::call(self_meta, out_meta); |
16344 | } |
16345 | |
16346 | at::Tensor self_; |
16347 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16348 | at::functionalization::impl::sync(self); |
16349 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16350 | } else { |
16351 | self_ = self; |
16352 | } |
16353 | |
16354 | at::Tensor out_; |
16355 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16356 | at::functionalization::impl::sync(out); |
16357 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16358 | } else { |
16359 | out_ = out; |
16360 | } |
16361 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16362 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16363 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16364 | TORCH_INTERNAL_ASSERT(false, |
16365 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16366 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16367 | } else { |
16368 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16369 | at::AutoDispatchSkipFunctionalize guard; |
16370 | at::Tensor tmp_output = at::_ops::isneginf_out::call(self_, out_); |
16371 | return out;; |
16372 | } |
16373 | } else { |
16374 | at::Tensor tmp_output; |
16375 | { |
16376 | at::AutoDispatchSkipFunctionalize guard; |
16377 | tmp_output = at::_ops::isneginf::call(self_); |
16378 | } |
16379 | at::functionalization::impl::replace_(out, tmp_output); |
16380 | at::functionalization::impl::commit_update(out); |
16381 | at::functionalization::impl::sync(out); |
16382 | return out; |
16383 | } |
16384 | } |
16385 | |
16386 | at::Tensor & special_expm1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
16387 | if (false) { |
16388 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16389 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16390 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16391 | auto self_meta = to_meta(self); |
16392 | auto out_meta = to_meta(out); |
16393 | at::AutoDispatchSkipFunctionalize func_guard; |
16394 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16395 | at::_ops::special_expm1_out::call(self_meta, out_meta); |
16396 | } |
16397 | |
16398 | at::Tensor self_; |
16399 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16400 | at::functionalization::impl::sync(self); |
16401 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16402 | } else { |
16403 | self_ = self; |
16404 | } |
16405 | |
16406 | at::Tensor out_; |
16407 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16408 | at::functionalization::impl::sync(out); |
16409 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16410 | } else { |
16411 | out_ = out; |
16412 | } |
16413 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16414 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16415 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16416 | TORCH_INTERNAL_ASSERT(false, |
16417 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16418 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16419 | } else { |
16420 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16421 | at::AutoDispatchSkipFunctionalize guard; |
16422 | at::Tensor tmp_output = at::_ops::special_expm1_out::call(self_, out_); |
16423 | return out;; |
16424 | } |
16425 | } else { |
16426 | at::Tensor tmp_output; |
16427 | { |
16428 | at::AutoDispatchSkipFunctionalize guard; |
16429 | tmp_output = at::_ops::special_expm1::call(self_); |
16430 | } |
16431 | at::functionalization::impl::replace_(out, tmp_output); |
16432 | at::functionalization::impl::commit_update(out); |
16433 | at::functionalization::impl::sync(out); |
16434 | return out; |
16435 | } |
16436 | } |
16437 | |
16438 | at::Tensor & special_erf_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
16439 | if (false) { |
16440 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16441 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16442 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16443 | auto self_meta = to_meta(self); |
16444 | auto out_meta = to_meta(out); |
16445 | at::AutoDispatchSkipFunctionalize func_guard; |
16446 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16447 | at::_ops::special_erf_out::call(self_meta, out_meta); |
16448 | } |
16449 | |
16450 | at::Tensor self_; |
16451 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16452 | at::functionalization::impl::sync(self); |
16453 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16454 | } else { |
16455 | self_ = self; |
16456 | } |
16457 | |
16458 | at::Tensor out_; |
16459 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16460 | at::functionalization::impl::sync(out); |
16461 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16462 | } else { |
16463 | out_ = out; |
16464 | } |
16465 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16466 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16467 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16468 | TORCH_INTERNAL_ASSERT(false, |
16469 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16470 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16471 | } else { |
16472 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16473 | at::AutoDispatchSkipFunctionalize guard; |
16474 | at::Tensor tmp_output = at::_ops::special_erf_out::call(self_, out_); |
16475 | return out;; |
16476 | } |
16477 | } else { |
16478 | at::Tensor tmp_output; |
16479 | { |
16480 | at::AutoDispatchSkipFunctionalize guard; |
16481 | tmp_output = at::_ops::special_erf::call(self_); |
16482 | } |
16483 | at::functionalization::impl::replace_(out, tmp_output); |
16484 | at::functionalization::impl::commit_update(out); |
16485 | at::functionalization::impl::sync(out); |
16486 | return out; |
16487 | } |
16488 | } |
16489 | |
16490 | at::Tensor & special_logsumexp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { |
16491 | if (false) { |
16492 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16493 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16494 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16495 | auto self_meta = to_meta(self); |
16496 | auto out_meta = to_meta(out); |
16497 | at::AutoDispatchSkipFunctionalize func_guard; |
16498 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16499 | at::_ops::special_logsumexp_out::call(self_meta, dim, keepdim, out_meta); |
16500 | } |
16501 | |
16502 | at::Tensor self_; |
16503 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16504 | at::functionalization::impl::sync(self); |
16505 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16506 | } else { |
16507 | self_ = self; |
16508 | } |
16509 | |
16510 | at::Tensor out_; |
16511 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16512 | at::functionalization::impl::sync(out); |
16513 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16514 | } else { |
16515 | out_ = out; |
16516 | } |
16517 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16518 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16519 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16520 | TORCH_INTERNAL_ASSERT(false, |
16521 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16522 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16523 | } else { |
16524 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16525 | at::AutoDispatchSkipFunctionalize guard; |
16526 | at::Tensor tmp_output = at::_ops::special_logsumexp_out::call(self_, dim, keepdim, out_); |
16527 | return out;; |
16528 | } |
16529 | } else { |
16530 | at::Tensor tmp_output; |
16531 | { |
16532 | at::AutoDispatchSkipFunctionalize guard; |
16533 | tmp_output = at::_ops::special_logsumexp::call(self_, dim, keepdim); |
16534 | } |
16535 | at::functionalization::impl::replace_(out, tmp_output); |
16536 | at::functionalization::impl::commit_update(out); |
16537 | at::functionalization::impl::sync(out); |
16538 | return out; |
16539 | } |
16540 | } |
16541 | |
16542 | at::Tensor & special_log1p_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
16543 | if (false) { |
16544 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16545 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16546 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16547 | auto self_meta = to_meta(self); |
16548 | auto out_meta = to_meta(out); |
16549 | at::AutoDispatchSkipFunctionalize func_guard; |
16550 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16551 | at::_ops::special_log1p_out::call(self_meta, out_meta); |
16552 | } |
16553 | |
16554 | at::Tensor self_; |
16555 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16556 | at::functionalization::impl::sync(self); |
16557 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16558 | } else { |
16559 | self_ = self; |
16560 | } |
16561 | |
16562 | at::Tensor out_; |
16563 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16564 | at::functionalization::impl::sync(out); |
16565 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16566 | } else { |
16567 | out_ = out; |
16568 | } |
16569 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16570 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16571 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16572 | TORCH_INTERNAL_ASSERT(false, |
16573 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16574 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16575 | } else { |
16576 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16577 | at::AutoDispatchSkipFunctionalize guard; |
16578 | at::Tensor tmp_output = at::_ops::special_log1p_out::call(self_, out_); |
16579 | return out;; |
16580 | } |
16581 | } else { |
16582 | at::Tensor tmp_output; |
16583 | { |
16584 | at::AutoDispatchSkipFunctionalize guard; |
16585 | tmp_output = at::_ops::special_log1p::call(self_); |
16586 | } |
16587 | at::functionalization::impl::replace_(out, tmp_output); |
16588 | at::functionalization::impl::commit_update(out); |
16589 | at::functionalization::impl::sync(out); |
16590 | return out; |
16591 | } |
16592 | } |
16593 | |
16594 | at::Tensor & special_gammaincc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
16595 | if (false) { |
16596 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16597 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16598 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16599 | auto self_meta = to_meta(self); |
16600 | auto other_meta = to_meta(other); |
16601 | auto out_meta = to_meta(out); |
16602 | at::AutoDispatchSkipFunctionalize func_guard; |
16603 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16604 | at::_ops::special_gammaincc_out::call(self_meta, other_meta, out_meta); |
16605 | } |
16606 | |
16607 | at::Tensor self_; |
16608 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16609 | at::functionalization::impl::sync(self); |
16610 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16611 | } else { |
16612 | self_ = self; |
16613 | } |
16614 | |
16615 | at::Tensor other_; |
16616 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
16617 | at::functionalization::impl::sync(other); |
16618 | other_ = at::functionalization::impl::from_functional_tensor(other); |
16619 | } else { |
16620 | other_ = other; |
16621 | } |
16622 | |
16623 | at::Tensor out_; |
16624 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16625 | at::functionalization::impl::sync(out); |
16626 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16627 | } else { |
16628 | out_ = out; |
16629 | } |
16630 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16631 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
16632 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16633 | TORCH_INTERNAL_ASSERT(false, |
16634 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16635 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16636 | } else { |
16637 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16638 | at::AutoDispatchSkipFunctionalize guard; |
16639 | at::Tensor tmp_output = at::_ops::special_gammaincc_out::call(self_, other_, out_); |
16640 | return out;; |
16641 | } |
16642 | } else { |
16643 | at::Tensor tmp_output; |
16644 | { |
16645 | at::AutoDispatchSkipFunctionalize guard; |
16646 | tmp_output = at::_ops::special_gammaincc::call(self_, other_); |
16647 | } |
16648 | at::functionalization::impl::replace_(out, tmp_output); |
16649 | at::functionalization::impl::commit_update(out); |
16650 | at::functionalization::impl::sync(out); |
16651 | return out; |
16652 | } |
16653 | } |
16654 | |
16655 | at::Tensor & special_multigammaln_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out) { |
16656 | if (false) { |
16657 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16658 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16659 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16660 | auto self_meta = to_meta(self); |
16661 | auto out_meta = to_meta(out); |
16662 | at::AutoDispatchSkipFunctionalize func_guard; |
16663 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16664 | at::_ops::special_multigammaln_out::call(self_meta, p, out_meta); |
16665 | } |
16666 | |
16667 | at::Tensor self_; |
16668 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16669 | at::functionalization::impl::sync(self); |
16670 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16671 | } else { |
16672 | self_ = self; |
16673 | } |
16674 | |
16675 | at::Tensor out_; |
16676 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16677 | at::functionalization::impl::sync(out); |
16678 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16679 | } else { |
16680 | out_ = out; |
16681 | } |
16682 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16683 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16684 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16685 | TORCH_INTERNAL_ASSERT(false, |
16686 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16687 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16688 | } else { |
16689 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16690 | at::AutoDispatchSkipFunctionalize guard; |
16691 | at::Tensor tmp_output = at::_ops::special_multigammaln_out::call(self_, p, out_); |
16692 | return out;; |
16693 | } |
16694 | } else { |
16695 | at::Tensor tmp_output; |
16696 | { |
16697 | at::AutoDispatchSkipFunctionalize guard; |
16698 | tmp_output = at::_ops::special_multigammaln::call(self_, p); |
16699 | } |
16700 | at::functionalization::impl::replace_(out, tmp_output); |
16701 | at::functionalization::impl::commit_update(out); |
16702 | at::functionalization::impl::sync(out); |
16703 | return out; |
16704 | } |
16705 | } |
16706 | |
16707 | at::Tensor & fft_rfft2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
16708 | if (false) { |
16709 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16710 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16711 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16712 | auto self_meta = to_meta(self); |
16713 | auto out_meta = to_meta(out); |
16714 | at::AutoDispatchSkipFunctionalize func_guard; |
16715 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16716 | at::_ops::fft_rfft2_out::call(self_meta, s, dim, norm, out_meta); |
16717 | } |
16718 | |
16719 | at::Tensor self_; |
16720 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16721 | at::functionalization::impl::sync(self); |
16722 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16723 | } else { |
16724 | self_ = self; |
16725 | } |
16726 | |
16727 | at::Tensor out_; |
16728 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16729 | at::functionalization::impl::sync(out); |
16730 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16731 | } else { |
16732 | out_ = out; |
16733 | } |
16734 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16735 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16736 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16737 | TORCH_INTERNAL_ASSERT(false, |
16738 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16739 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16740 | } else { |
16741 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16742 | at::AutoDispatchSkipFunctionalize guard; |
16743 | at::Tensor tmp_output = at::_ops::fft_rfft2_out::call(self_, s, dim, norm, out_); |
16744 | return out;; |
16745 | } |
16746 | } else { |
16747 | at::Tensor tmp_output; |
16748 | { |
16749 | at::AutoDispatchSkipFunctionalize guard; |
16750 | tmp_output = at::_ops::fft_rfft2::call(self_, s, dim, norm); |
16751 | } |
16752 | at::functionalization::impl::replace_(out, tmp_output); |
16753 | at::functionalization::impl::commit_update(out); |
16754 | at::functionalization::impl::sync(out); |
16755 | return out; |
16756 | } |
16757 | } |
16758 | |
16759 | at::Tensor & fft_irfft2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
16760 | if (false) { |
16761 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16762 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16763 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16764 | auto self_meta = to_meta(self); |
16765 | auto out_meta = to_meta(out); |
16766 | at::AutoDispatchSkipFunctionalize func_guard; |
16767 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16768 | at::_ops::fft_irfft2_out::call(self_meta, s, dim, norm, out_meta); |
16769 | } |
16770 | |
16771 | at::Tensor self_; |
16772 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16773 | at::functionalization::impl::sync(self); |
16774 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16775 | } else { |
16776 | self_ = self; |
16777 | } |
16778 | |
16779 | at::Tensor out_; |
16780 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16781 | at::functionalization::impl::sync(out); |
16782 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16783 | } else { |
16784 | out_ = out; |
16785 | } |
16786 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16787 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16788 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16789 | TORCH_INTERNAL_ASSERT(false, |
16790 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16791 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16792 | } else { |
16793 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16794 | at::AutoDispatchSkipFunctionalize guard; |
16795 | at::Tensor tmp_output = at::_ops::fft_irfft2_out::call(self_, s, dim, norm, out_); |
16796 | return out;; |
16797 | } |
16798 | } else { |
16799 | at::Tensor tmp_output; |
16800 | { |
16801 | at::AutoDispatchSkipFunctionalize guard; |
16802 | tmp_output = at::_ops::fft_irfft2::call(self_, s, dim, norm); |
16803 | } |
16804 | at::functionalization::impl::replace_(out, tmp_output); |
16805 | at::functionalization::impl::commit_update(out); |
16806 | at::functionalization::impl::sync(out); |
16807 | return out; |
16808 | } |
16809 | } |
16810 | |
16811 | const at::Tensor & fft_ihfft2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) { |
16812 | if (false) { |
16813 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16814 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16815 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16816 | auto self_meta = to_meta(self); |
16817 | auto out_meta = to_meta(out); |
16818 | at::AutoDispatchSkipFunctionalize func_guard; |
16819 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16820 | at::_ops::fft_ihfft2_out::call(self_meta, s, dim, norm, out_meta); |
16821 | } |
16822 | |
16823 | at::Tensor self_; |
16824 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16825 | at::functionalization::impl::sync(self); |
16826 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16827 | } else { |
16828 | self_ = self; |
16829 | } |
16830 | |
16831 | at::Tensor out_; |
16832 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16833 | at::functionalization::impl::sync(out); |
16834 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16835 | } else { |
16836 | out_ = out; |
16837 | } |
16838 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16839 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16840 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16841 | TORCH_INTERNAL_ASSERT(false, |
16842 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16843 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16844 | } else { |
16845 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16846 | at::AutoDispatchSkipFunctionalize guard; |
16847 | at::Tensor tmp_output = at::_ops::fft_ihfft2_out::call(self_, s, dim, norm, out_); |
16848 | return out;; |
16849 | } |
16850 | } else { |
16851 | at::Tensor tmp_output; |
16852 | { |
16853 | at::AutoDispatchSkipFunctionalize guard; |
16854 | tmp_output = at::_ops::fft_ihfft2::call(self_, s, dim, norm); |
16855 | } |
16856 | at::functionalization::impl::replace_(out, tmp_output); |
16857 | at::functionalization::impl::commit_update(out); |
16858 | at::functionalization::impl::sync(out); |
16859 | return out; |
16860 | } |
16861 | } |
16862 | |
16863 | at::Tensor & fft_fftn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
16864 | if (false) { |
16865 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16866 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16867 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16868 | auto self_meta = to_meta(self); |
16869 | auto out_meta = to_meta(out); |
16870 | at::AutoDispatchSkipFunctionalize func_guard; |
16871 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16872 | at::_ops::fft_fftn_out::call(self_meta, s, dim, norm, out_meta); |
16873 | } |
16874 | |
16875 | at::Tensor self_; |
16876 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16877 | at::functionalization::impl::sync(self); |
16878 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16879 | } else { |
16880 | self_ = self; |
16881 | } |
16882 | |
16883 | at::Tensor out_; |
16884 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16885 | at::functionalization::impl::sync(out); |
16886 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16887 | } else { |
16888 | out_ = out; |
16889 | } |
16890 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16891 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16892 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16893 | TORCH_INTERNAL_ASSERT(false, |
16894 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16895 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16896 | } else { |
16897 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16898 | at::AutoDispatchSkipFunctionalize guard; |
16899 | at::Tensor tmp_output = at::_ops::fft_fftn_out::call(self_, s, dim, norm, out_); |
16900 | return out;; |
16901 | } |
16902 | } else { |
16903 | at::Tensor tmp_output; |
16904 | { |
16905 | at::AutoDispatchSkipFunctionalize guard; |
16906 | tmp_output = at::_ops::fft_fftn::call(self_, s, dim, norm); |
16907 | } |
16908 | at::functionalization::impl::replace_(out, tmp_output); |
16909 | at::functionalization::impl::commit_update(out); |
16910 | at::functionalization::impl::sync(out); |
16911 | return out; |
16912 | } |
16913 | } |
16914 | |
16915 | at::Tensor & fft_irfftn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
16916 | if (false) { |
16917 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16918 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16919 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16920 | auto self_meta = to_meta(self); |
16921 | auto out_meta = to_meta(out); |
16922 | at::AutoDispatchSkipFunctionalize func_guard; |
16923 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16924 | at::_ops::fft_irfftn_out::call(self_meta, s, dim, norm, out_meta); |
16925 | } |
16926 | |
16927 | at::Tensor self_; |
16928 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16929 | at::functionalization::impl::sync(self); |
16930 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16931 | } else { |
16932 | self_ = self; |
16933 | } |
16934 | |
16935 | at::Tensor out_; |
16936 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16937 | at::functionalization::impl::sync(out); |
16938 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16939 | } else { |
16940 | out_ = out; |
16941 | } |
16942 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16943 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16944 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16945 | TORCH_INTERNAL_ASSERT(false, |
16946 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16947 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16948 | } else { |
16949 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16950 | at::AutoDispatchSkipFunctionalize guard; |
16951 | at::Tensor tmp_output = at::_ops::fft_irfftn_out::call(self_, s, dim, norm, out_); |
16952 | return out;; |
16953 | } |
16954 | } else { |
16955 | at::Tensor tmp_output; |
16956 | { |
16957 | at::AutoDispatchSkipFunctionalize guard; |
16958 | tmp_output = at::_ops::fft_irfftn::call(self_, s, dim, norm); |
16959 | } |
16960 | at::functionalization::impl::replace_(out, tmp_output); |
16961 | at::functionalization::impl::commit_update(out); |
16962 | at::functionalization::impl::sync(out); |
16963 | return out; |
16964 | } |
16965 | } |
16966 | |
16967 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) { |
16968 | if (false) { |
16969 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16970 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16971 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16972 | auto A_meta = to_meta(A); |
16973 | auto LU_meta = to_meta(LU); |
16974 | auto pivots_meta = to_meta(pivots); |
16975 | at::AutoDispatchSkipFunctionalize func_guard; |
16976 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16977 | at::_ops::linalg_lu_factor_out::call(A_meta, pivot, LU_meta, pivots_meta); |
16978 | } |
16979 | |
16980 | at::Tensor A_; |
16981 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
16982 | at::functionalization::impl::sync(A); |
16983 | A_ = at::functionalization::impl::from_functional_tensor(A); |
16984 | } else { |
16985 | A_ = A; |
16986 | } |
16987 | |
16988 | at::Tensor LU_; |
16989 | if (at::functionalization::impl::isFunctionalTensor(LU)) { |
16990 | at::functionalization::impl::sync(LU); |
16991 | LU_ = at::functionalization::impl::from_functional_tensor(LU); |
16992 | } else { |
16993 | LU_ = LU; |
16994 | } |
16995 | |
16996 | at::Tensor pivots_; |
16997 | if (at::functionalization::impl::isFunctionalTensor(pivots)) { |
16998 | at::functionalization::impl::sync(pivots); |
16999 | pivots_ = at::functionalization::impl::from_functional_tensor(pivots); |
17000 | } else { |
17001 | pivots_ = pivots; |
17002 | } |
17003 | if (!(true && at::functionalization::impl::isFunctionalTensor(LU) && at::functionalization::impl::isFunctionalTensor(pivots))) { |
17004 | if ((false || at::functionalization::impl::isFunctionalTensor(A))) { |
17005 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17006 | TORCH_INTERNAL_ASSERT(false, |
17007 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17008 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17009 | } else { |
17010 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17011 | at::AutoDispatchSkipFunctionalize guard; |
17012 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_lu_factor_out::call(A_, pivot, LU_, pivots_); |
17013 | return ::std::tuple<at::Tensor &,at::Tensor &>(LU, pivots);; |
17014 | } |
17015 | } else { |
17016 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
17017 | { |
17018 | at::AutoDispatchSkipFunctionalize guard; |
17019 | tmp_output = at::_ops::linalg_lu_factor::call(A_, pivot); |
17020 | } |
17021 | at::functionalization::impl::replace_(LU, std::get<0>(tmp_output)); |
17022 | at::functionalization::impl::commit_update(LU); |
17023 | at::functionalization::impl::sync(LU); |
17024 | at::functionalization::impl::replace_(pivots, std::get<1>(tmp_output)); |
17025 | at::functionalization::impl::commit_update(pivots); |
17026 | at::functionalization::impl::sync(pivots); |
17027 | return ::std::tuple<at::Tensor &,at::Tensor &>(LU, pivots); |
17028 | } |
17029 | } |
17030 | |
17031 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) { |
17032 | if (false) { |
17033 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17034 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17035 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17036 | auto A_meta = to_meta(A); |
17037 | auto LU_meta = to_meta(LU); |
17038 | auto pivots_meta = to_meta(pivots); |
17039 | auto info_meta = to_meta(info); |
17040 | at::AutoDispatchSkipFunctionalize func_guard; |
17041 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17042 | at::_ops::linalg_lu_factor_ex_out::call(A_meta, pivot, check_errors, LU_meta, pivots_meta, info_meta); |
17043 | } |
17044 | |
17045 | at::Tensor A_; |
17046 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
17047 | at::functionalization::impl::sync(A); |
17048 | A_ = at::functionalization::impl::from_functional_tensor(A); |
17049 | } else { |
17050 | A_ = A; |
17051 | } |
17052 | |
17053 | at::Tensor LU_; |
17054 | if (at::functionalization::impl::isFunctionalTensor(LU)) { |
17055 | at::functionalization::impl::sync(LU); |
17056 | LU_ = at::functionalization::impl::from_functional_tensor(LU); |
17057 | } else { |
17058 | LU_ = LU; |
17059 | } |
17060 | |
17061 | at::Tensor pivots_; |
17062 | if (at::functionalization::impl::isFunctionalTensor(pivots)) { |
17063 | at::functionalization::impl::sync(pivots); |
17064 | pivots_ = at::functionalization::impl::from_functional_tensor(pivots); |
17065 | } else { |
17066 | pivots_ = pivots; |
17067 | } |
17068 | |
17069 | at::Tensor info_; |
17070 | if (at::functionalization::impl::isFunctionalTensor(info)) { |
17071 | at::functionalization::impl::sync(info); |
17072 | info_ = at::functionalization::impl::from_functional_tensor(info); |
17073 | } else { |
17074 | info_ = info; |
17075 | } |
17076 | if (!(true && at::functionalization::impl::isFunctionalTensor(LU) && at::functionalization::impl::isFunctionalTensor(pivots) && at::functionalization::impl::isFunctionalTensor(info))) { |
17077 | if ((false || at::functionalization::impl::isFunctionalTensor(A))) { |
17078 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17079 | TORCH_INTERNAL_ASSERT(false, |
17080 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17081 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17082 | } else { |
17083 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17084 | at::AutoDispatchSkipFunctionalize guard; |
17085 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_lu_factor_ex_out::call(A_, pivot, check_errors, LU_, pivots_, info_); |
17086 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(LU, pivots, info);; |
17087 | } |
17088 | } else { |
17089 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
17090 | { |
17091 | at::AutoDispatchSkipFunctionalize guard; |
17092 | tmp_output = at::_ops::linalg_lu_factor_ex::call(A_, pivot, check_errors); |
17093 | } |
17094 | at::functionalization::impl::replace_(LU, std::get<0>(tmp_output)); |
17095 | at::functionalization::impl::commit_update(LU); |
17096 | at::functionalization::impl::sync(LU); |
17097 | at::functionalization::impl::replace_(pivots, std::get<1>(tmp_output)); |
17098 | at::functionalization::impl::commit_update(pivots); |
17099 | at::functionalization::impl::sync(pivots); |
17100 | at::functionalization::impl::replace_(info, std::get<2>(tmp_output)); |
17101 | at::functionalization::impl::commit_update(info); |
17102 | at::functionalization::impl::sync(info); |
17103 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(LU, pivots, info); |
17104 | } |
17105 | } |
17106 | |
17107 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) { |
17108 | if (false) { |
17109 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17110 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17111 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17112 | auto self_meta = to_meta(self); |
17113 | auto LD_meta = to_meta(LD); |
17114 | auto pivots_meta = to_meta(pivots); |
17115 | at::AutoDispatchSkipFunctionalize func_guard; |
17116 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17117 | at::_ops::linalg_ldl_factor_out::call(self_meta, hermitian, LD_meta, pivots_meta); |
17118 | } |
17119 | |
17120 | at::Tensor self_; |
17121 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17122 | at::functionalization::impl::sync(self); |
17123 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17124 | } else { |
17125 | self_ = self; |
17126 | } |
17127 | |
17128 | at::Tensor LD_; |
17129 | if (at::functionalization::impl::isFunctionalTensor(LD)) { |
17130 | at::functionalization::impl::sync(LD); |
17131 | LD_ = at::functionalization::impl::from_functional_tensor(LD); |
17132 | } else { |
17133 | LD_ = LD; |
17134 | } |
17135 | |
17136 | at::Tensor pivots_; |
17137 | if (at::functionalization::impl::isFunctionalTensor(pivots)) { |
17138 | at::functionalization::impl::sync(pivots); |
17139 | pivots_ = at::functionalization::impl::from_functional_tensor(pivots); |
17140 | } else { |
17141 | pivots_ = pivots; |
17142 | } |
17143 | if (!(true && at::functionalization::impl::isFunctionalTensor(LD) && at::functionalization::impl::isFunctionalTensor(pivots))) { |
17144 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17145 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17146 | TORCH_INTERNAL_ASSERT(false, |
17147 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17148 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17149 | } else { |
17150 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17151 | at::AutoDispatchSkipFunctionalize guard; |
17152 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_ldl_factor_out::call(self_, hermitian, LD_, pivots_); |
17153 | return ::std::tuple<at::Tensor &,at::Tensor &>(LD, pivots);; |
17154 | } |
17155 | } else { |
17156 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
17157 | { |
17158 | at::AutoDispatchSkipFunctionalize guard; |
17159 | tmp_output = at::_ops::linalg_ldl_factor::call(self_, hermitian); |
17160 | } |
17161 | at::functionalization::impl::replace_(LD, std::get<0>(tmp_output)); |
17162 | at::functionalization::impl::commit_update(LD); |
17163 | at::functionalization::impl::sync(LD); |
17164 | at::functionalization::impl::replace_(pivots, std::get<1>(tmp_output)); |
17165 | at::functionalization::impl::commit_update(pivots); |
17166 | at::functionalization::impl::sync(pivots); |
17167 | return ::std::tuple<at::Tensor &,at::Tensor &>(LD, pivots); |
17168 | } |
17169 | } |
17170 | |
17171 | at::Tensor & linalg_ldl_solve_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) { |
17172 | if (false) { |
17173 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17174 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17175 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17176 | auto LD_meta = to_meta(LD); |
17177 | auto pivots_meta = to_meta(pivots); |
17178 | auto B_meta = to_meta(B); |
17179 | auto out_meta = to_meta(out); |
17180 | at::AutoDispatchSkipFunctionalize func_guard; |
17181 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17182 | at::_ops::linalg_ldl_solve_out::call(LD_meta, pivots_meta, B_meta, hermitian, out_meta); |
17183 | } |
17184 | |
17185 | at::Tensor LD_; |
17186 | if (at::functionalization::impl::isFunctionalTensor(LD)) { |
17187 | at::functionalization::impl::sync(LD); |
17188 | LD_ = at::functionalization::impl::from_functional_tensor(LD); |
17189 | } else { |
17190 | LD_ = LD; |
17191 | } |
17192 | |
17193 | at::Tensor pivots_; |
17194 | if (at::functionalization::impl::isFunctionalTensor(pivots)) { |
17195 | at::functionalization::impl::sync(pivots); |
17196 | pivots_ = at::functionalization::impl::from_functional_tensor(pivots); |
17197 | } else { |
17198 | pivots_ = pivots; |
17199 | } |
17200 | |
17201 | at::Tensor B_; |
17202 | if (at::functionalization::impl::isFunctionalTensor(B)) { |
17203 | at::functionalization::impl::sync(B); |
17204 | B_ = at::functionalization::impl::from_functional_tensor(B); |
17205 | } else { |
17206 | B_ = B; |
17207 | } |
17208 | |
17209 | at::Tensor out_; |
17210 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17211 | at::functionalization::impl::sync(out); |
17212 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17213 | } else { |
17214 | out_ = out; |
17215 | } |
17216 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17217 | if ((false || at::functionalization::impl::isFunctionalTensor(LD) || at::functionalization::impl::isFunctionalTensor(pivots) || at::functionalization::impl::isFunctionalTensor(B))) { |
17218 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17219 | TORCH_INTERNAL_ASSERT(false, |
17220 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17221 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17222 | } else { |
17223 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17224 | at::AutoDispatchSkipFunctionalize guard; |
17225 | at::Tensor tmp_output = at::_ops::linalg_ldl_solve_out::call(LD_, pivots_, B_, hermitian, out_); |
17226 | return out;; |
17227 | } |
17228 | } else { |
17229 | at::Tensor tmp_output; |
17230 | { |
17231 | at::AutoDispatchSkipFunctionalize guard; |
17232 | tmp_output = at::_ops::linalg_ldl_solve::call(LD_, pivots_, B_, hermitian); |
17233 | } |
17234 | at::functionalization::impl::replace_(out, tmp_output); |
17235 | at::functionalization::impl::commit_update(out); |
17236 | at::functionalization::impl::sync(out); |
17237 | return out; |
17238 | } |
17239 | } |
17240 | |
17241 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_out_sign(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots) { |
17242 | if (false) { |
17243 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17244 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17245 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17246 | auto A_meta = to_meta(A); |
17247 | auto sign_meta = to_meta(sign); |
17248 | auto logabsdet_meta = to_meta(logabsdet); |
17249 | auto LU_meta = to_meta(LU); |
17250 | auto pivots_meta = to_meta(pivots); |
17251 | at::AutoDispatchSkipFunctionalize func_guard; |
17252 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17253 | at::_ops::_linalg_slogdet_sign::call(A_meta, sign_meta, logabsdet_meta, LU_meta, pivots_meta); |
17254 | } |
17255 | |
17256 | at::Tensor A_; |
17257 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
17258 | at::functionalization::impl::sync(A); |
17259 | A_ = at::functionalization::impl::from_functional_tensor(A); |
17260 | } else { |
17261 | A_ = A; |
17262 | } |
17263 | |
17264 | at::Tensor sign_; |
17265 | if (at::functionalization::impl::isFunctionalTensor(sign)) { |
17266 | at::functionalization::impl::sync(sign); |
17267 | sign_ = at::functionalization::impl::from_functional_tensor(sign); |
17268 | } else { |
17269 | sign_ = sign; |
17270 | } |
17271 | |
17272 | at::Tensor logabsdet_; |
17273 | if (at::functionalization::impl::isFunctionalTensor(logabsdet)) { |
17274 | at::functionalization::impl::sync(logabsdet); |
17275 | logabsdet_ = at::functionalization::impl::from_functional_tensor(logabsdet); |
17276 | } else { |
17277 | logabsdet_ = logabsdet; |
17278 | } |
17279 | |
17280 | at::Tensor LU_; |
17281 | if (at::functionalization::impl::isFunctionalTensor(LU)) { |
17282 | at::functionalization::impl::sync(LU); |
17283 | LU_ = at::functionalization::impl::from_functional_tensor(LU); |
17284 | } else { |
17285 | LU_ = LU; |
17286 | } |
17287 | |
17288 | at::Tensor pivots_; |
17289 | if (at::functionalization::impl::isFunctionalTensor(pivots)) { |
17290 | at::functionalization::impl::sync(pivots); |
17291 | pivots_ = at::functionalization::impl::from_functional_tensor(pivots); |
17292 | } else { |
17293 | pivots_ = pivots; |
17294 | } |
17295 | if (!(true && at::functionalization::impl::isFunctionalTensor(sign) && at::functionalization::impl::isFunctionalTensor(logabsdet) && at::functionalization::impl::isFunctionalTensor(LU) && at::functionalization::impl::isFunctionalTensor(pivots))) { |
17296 | if ((false || at::functionalization::impl::isFunctionalTensor(A))) { |
17297 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17298 | TORCH_INTERNAL_ASSERT(false, |
17299 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17300 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17301 | } else { |
17302 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17303 | at::AutoDispatchSkipFunctionalize guard; |
17304 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_linalg_slogdet_sign::call(A_, sign_, logabsdet_, LU_, pivots_); |
17305 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(sign, logabsdet, LU, pivots);; |
17306 | } |
17307 | } else { |
17308 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output; |
17309 | { |
17310 | at::AutoDispatchSkipFunctionalize guard; |
17311 | tmp_output = at::_ops::_linalg_slogdet::call(A_); |
17312 | } |
17313 | at::functionalization::impl::replace_(sign, std::get<0>(tmp_output)); |
17314 | at::functionalization::impl::commit_update(sign); |
17315 | at::functionalization::impl::sync(sign); |
17316 | at::functionalization::impl::replace_(logabsdet, std::get<1>(tmp_output)); |
17317 | at::functionalization::impl::commit_update(logabsdet); |
17318 | at::functionalization::impl::sync(logabsdet); |
17319 | at::functionalization::impl::replace_(LU, std::get<2>(tmp_output)); |
17320 | at::functionalization::impl::commit_update(LU); |
17321 | at::functionalization::impl::sync(LU); |
17322 | at::functionalization::impl::replace_(pivots, std::get<3>(tmp_output)); |
17323 | at::functionalization::impl::commit_update(pivots); |
17324 | at::functionalization::impl::sync(pivots); |
17325 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(sign, logabsdet, LU, pivots); |
17326 | } |
17327 | } |
17328 | |
17329 | at::Tensor & linalg_eigvals_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
17330 | if (false) { |
17331 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17332 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17333 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17334 | auto self_meta = to_meta(self); |
17335 | auto out_meta = to_meta(out); |
17336 | at::AutoDispatchSkipFunctionalize func_guard; |
17337 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17338 | at::_ops::linalg_eigvals_out::call(self_meta, out_meta); |
17339 | } |
17340 | |
17341 | at::Tensor self_; |
17342 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17343 | at::functionalization::impl::sync(self); |
17344 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17345 | } else { |
17346 | self_ = self; |
17347 | } |
17348 | |
17349 | at::Tensor out_; |
17350 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17351 | at::functionalization::impl::sync(out); |
17352 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17353 | } else { |
17354 | out_ = out; |
17355 | } |
17356 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17357 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17358 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17359 | TORCH_INTERNAL_ASSERT(false, |
17360 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17361 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17362 | } else { |
17363 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17364 | at::AutoDispatchSkipFunctionalize guard; |
17365 | at::Tensor tmp_output = at::_ops::linalg_eigvals_out::call(self_, out_); |
17366 | return out;; |
17367 | } |
17368 | } else { |
17369 | at::Tensor tmp_output; |
17370 | { |
17371 | at::AutoDispatchSkipFunctionalize guard; |
17372 | tmp_output = at::_ops::linalg_eigvals::call(self_); |
17373 | } |
17374 | at::functionalization::impl::replace_(out, tmp_output); |
17375 | at::functionalization::impl::commit_update(out); |
17376 | at::functionalization::impl::sync(out); |
17377 | return out; |
17378 | } |
17379 | } |
17380 | |
17381 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_eigh_out_eigvals(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) { |
17382 | if (false) { |
17383 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17384 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17385 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17386 | auto self_meta = to_meta(self); |
17387 | auto eigvals_meta = to_meta(eigvals); |
17388 | auto eigvecs_meta = to_meta(eigvecs); |
17389 | at::AutoDispatchSkipFunctionalize func_guard; |
17390 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17391 | at::_ops::linalg_eigh_eigvals::call(self_meta, UPLO, eigvals_meta, eigvecs_meta); |
17392 | } |
17393 | |
17394 | at::Tensor self_; |
17395 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17396 | at::functionalization::impl::sync(self); |
17397 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17398 | } else { |
17399 | self_ = self; |
17400 | } |
17401 | |
17402 | at::Tensor eigvals_; |
17403 | if (at::functionalization::impl::isFunctionalTensor(eigvals)) { |
17404 | at::functionalization::impl::sync(eigvals); |
17405 | eigvals_ = at::functionalization::impl::from_functional_tensor(eigvals); |
17406 | } else { |
17407 | eigvals_ = eigvals; |
17408 | } |
17409 | |
17410 | at::Tensor eigvecs_; |
17411 | if (at::functionalization::impl::isFunctionalTensor(eigvecs)) { |
17412 | at::functionalization::impl::sync(eigvecs); |
17413 | eigvecs_ = at::functionalization::impl::from_functional_tensor(eigvecs); |
17414 | } else { |
17415 | eigvecs_ = eigvecs; |
17416 | } |
17417 | if (!(true && at::functionalization::impl::isFunctionalTensor(eigvals) && at::functionalization::impl::isFunctionalTensor(eigvecs))) { |
17418 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17419 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17420 | TORCH_INTERNAL_ASSERT(false, |
17421 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17422 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17423 | } else { |
17424 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17425 | at::AutoDispatchSkipFunctionalize guard; |
17426 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_eigh_eigvals::call(self_, UPLO, eigvals_, eigvecs_); |
17427 | return ::std::tuple<at::Tensor &,at::Tensor &>(eigvals, eigvecs);; |
17428 | } |
17429 | } else { |
17430 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
17431 | { |
17432 | at::AutoDispatchSkipFunctionalize guard; |
17433 | tmp_output = at::_ops::linalg_eigh::call(self_, UPLO); |
17434 | } |
17435 | at::functionalization::impl::replace_(eigvals, std::get<0>(tmp_output)); |
17436 | at::functionalization::impl::commit_update(eigvals); |
17437 | at::functionalization::impl::sync(eigvals); |
17438 | at::functionalization::impl::replace_(eigvecs, std::get<1>(tmp_output)); |
17439 | at::functionalization::impl::commit_update(eigvecs); |
17440 | at::functionalization::impl::sync(eigvecs); |
17441 | return ::std::tuple<at::Tensor &,at::Tensor &>(eigvals, eigvecs); |
17442 | } |
17443 | } |
17444 | |
17445 | at::Tensor & linalg_householder_product_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) { |
17446 | if (false) { |
17447 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17448 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17449 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17450 | auto input_meta = to_meta(input); |
17451 | auto tau_meta = to_meta(tau); |
17452 | auto out_meta = to_meta(out); |
17453 | at::AutoDispatchSkipFunctionalize func_guard; |
17454 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17455 | at::_ops::linalg_householder_product_out::call(input_meta, tau_meta, out_meta); |
17456 | } |
17457 | |
17458 | at::Tensor input_; |
17459 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
17460 | at::functionalization::impl::sync(input); |
17461 | input_ = at::functionalization::impl::from_functional_tensor(input); |
17462 | } else { |
17463 | input_ = input; |
17464 | } |
17465 | |
17466 | at::Tensor tau_; |
17467 | if (at::functionalization::impl::isFunctionalTensor(tau)) { |
17468 | at::functionalization::impl::sync(tau); |
17469 | tau_ = at::functionalization::impl::from_functional_tensor(tau); |
17470 | } else { |
17471 | tau_ = tau; |
17472 | } |
17473 | |
17474 | at::Tensor out_; |
17475 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17476 | at::functionalization::impl::sync(out); |
17477 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17478 | } else { |
17479 | out_ = out; |
17480 | } |
17481 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17482 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(tau))) { |
17483 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17484 | TORCH_INTERNAL_ASSERT(false, |
17485 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17486 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17487 | } else { |
17488 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17489 | at::AutoDispatchSkipFunctionalize guard; |
17490 | at::Tensor tmp_output = at::_ops::linalg_householder_product_out::call(input_, tau_, out_); |
17491 | return out;; |
17492 | } |
17493 | } else { |
17494 | at::Tensor tmp_output; |
17495 | { |
17496 | at::AutoDispatchSkipFunctionalize guard; |
17497 | tmp_output = at::_ops::linalg_householder_product::call(input_, tau_); |
17498 | } |
17499 | at::functionalization::impl::replace_(out, tmp_output); |
17500 | at::functionalization::impl::commit_update(out); |
17501 | at::functionalization::impl::sync(out); |
17502 | return out; |
17503 | } |
17504 | } |
17505 | |
17506 | at::Tensor & ger_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) { |
17507 | if (false) { |
17508 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17509 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17510 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17511 | auto self_meta = to_meta(self); |
17512 | auto vec2_meta = to_meta(vec2); |
17513 | auto out_meta = to_meta(out); |
17514 | at::AutoDispatchSkipFunctionalize func_guard; |
17515 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17516 | at::_ops::ger_out::call(self_meta, vec2_meta, out_meta); |
17517 | } |
17518 | |
17519 | at::Tensor self_; |
17520 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17521 | at::functionalization::impl::sync(self); |
17522 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17523 | } else { |
17524 | self_ = self; |
17525 | } |
17526 | |
17527 | at::Tensor vec2_; |
17528 | if (at::functionalization::impl::isFunctionalTensor(vec2)) { |
17529 | at::functionalization::impl::sync(vec2); |
17530 | vec2_ = at::functionalization::impl::from_functional_tensor(vec2); |
17531 | } else { |
17532 | vec2_ = vec2; |
17533 | } |
17534 | |
17535 | at::Tensor out_; |
17536 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17537 | at::functionalization::impl::sync(out); |
17538 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17539 | } else { |
17540 | out_ = out; |
17541 | } |
17542 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17543 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(vec2))) { |
17544 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17545 | TORCH_INTERNAL_ASSERT(false, |
17546 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17547 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17548 | } else { |
17549 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17550 | at::AutoDispatchSkipFunctionalize guard; |
17551 | at::Tensor tmp_output = at::_ops::ger_out::call(self_, vec2_, out_); |
17552 | return out;; |
17553 | } |
17554 | } else { |
17555 | at::Tensor tmp_output; |
17556 | { |
17557 | at::AutoDispatchSkipFunctionalize guard; |
17558 | tmp_output = at::_ops::ger::call(self_, vec2_); |
17559 | } |
17560 | at::functionalization::impl::replace_(out, tmp_output); |
17561 | at::functionalization::impl::commit_update(out); |
17562 | at::functionalization::impl::sync(out); |
17563 | return out; |
17564 | } |
17565 | } |
17566 | |
17567 | at::Tensor & linalg_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
17568 | if (false) { |
17569 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17570 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17571 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17572 | auto self_meta = to_meta(self); |
17573 | auto out_meta = to_meta(out); |
17574 | at::AutoDispatchSkipFunctionalize func_guard; |
17575 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17576 | at::_ops::linalg_norm_out::call(self_meta, ord, dim, keepdim, dtype, out_meta); |
17577 | } |
17578 | |
17579 | at::Tensor self_; |
17580 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17581 | at::functionalization::impl::sync(self); |
17582 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17583 | } else { |
17584 | self_ = self; |
17585 | } |
17586 | |
17587 | at::Tensor out_; |
17588 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17589 | at::functionalization::impl::sync(out); |
17590 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17591 | } else { |
17592 | out_ = out; |
17593 | } |
17594 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17595 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17596 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17597 | TORCH_INTERNAL_ASSERT(false, |
17598 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17599 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17600 | } else { |
17601 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17602 | at::AutoDispatchSkipFunctionalize guard; |
17603 | at::Tensor tmp_output = at::_ops::linalg_norm_out::call(self_, ord, dim, keepdim, dtype, out_); |
17604 | return out;; |
17605 | } |
17606 | } else { |
17607 | at::Tensor tmp_output; |
17608 | { |
17609 | at::AutoDispatchSkipFunctionalize guard; |
17610 | tmp_output = at::_ops::linalg_norm::call(self_, ord, dim, keepdim, dtype); |
17611 | } |
17612 | at::functionalization::impl::replace_(out, tmp_output); |
17613 | at::functionalization::impl::commit_update(out); |
17614 | at::functionalization::impl::sync(out); |
17615 | return out; |
17616 | } |
17617 | } |
17618 | |
17619 | at::Tensor & linalg_norm_out_ord_str_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
17620 | if (false) { |
17621 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17622 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17623 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17624 | auto self_meta = to_meta(self); |
17625 | auto out_meta = to_meta(out); |
17626 | at::AutoDispatchSkipFunctionalize func_guard; |
17627 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17628 | at::_ops::linalg_norm_ord_str_out::call(self_meta, ord, dim, keepdim, dtype, out_meta); |
17629 | } |
17630 | |
17631 | at::Tensor self_; |
17632 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17633 | at::functionalization::impl::sync(self); |
17634 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17635 | } else { |
17636 | self_ = self; |
17637 | } |
17638 | |
17639 | at::Tensor out_; |
17640 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17641 | at::functionalization::impl::sync(out); |
17642 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17643 | } else { |
17644 | out_ = out; |
17645 | } |
17646 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17647 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17648 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17649 | TORCH_INTERNAL_ASSERT(false, |
17650 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17651 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17652 | } else { |
17653 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17654 | at::AutoDispatchSkipFunctionalize guard; |
17655 | at::Tensor tmp_output = at::_ops::linalg_norm_ord_str_out::call(self_, ord, dim, keepdim, dtype, out_); |
17656 | return out;; |
17657 | } |
17658 | } else { |
17659 | at::Tensor tmp_output; |
17660 | { |
17661 | at::AutoDispatchSkipFunctionalize guard; |
17662 | tmp_output = at::_ops::linalg_norm_ord_str::call(self_, ord, dim, keepdim, dtype); |
17663 | } |
17664 | at::functionalization::impl::replace_(out, tmp_output); |
17665 | at::functionalization::impl::commit_update(out); |
17666 | at::functionalization::impl::sync(out); |
17667 | return out; |
17668 | } |
17669 | } |
17670 | |
17671 | at::Tensor & linalg_vector_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
17672 | if (false) { |
17673 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17674 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17675 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17676 | auto self_meta = to_meta(self); |
17677 | auto out_meta = to_meta(out); |
17678 | at::AutoDispatchSkipFunctionalize func_guard; |
17679 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17680 | at::_ops::linalg_vector_norm_out::call(self_meta, ord, dim, keepdim, dtype, out_meta); |
17681 | } |
17682 | |
17683 | at::Tensor self_; |
17684 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17685 | at::functionalization::impl::sync(self); |
17686 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17687 | } else { |
17688 | self_ = self; |
17689 | } |
17690 | |
17691 | at::Tensor out_; |
17692 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17693 | at::functionalization::impl::sync(out); |
17694 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17695 | } else { |
17696 | out_ = out; |
17697 | } |
17698 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17699 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17700 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17701 | TORCH_INTERNAL_ASSERT(false, |
17702 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17703 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17704 | } else { |
17705 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17706 | at::AutoDispatchSkipFunctionalize guard; |
17707 | at::Tensor tmp_output = at::_ops::linalg_vector_norm_out::call(self_, ord, dim, keepdim, dtype, out_); |
17708 | return out;; |
17709 | } |
17710 | } else { |
17711 | at::Tensor tmp_output; |
17712 | { |
17713 | at::AutoDispatchSkipFunctionalize guard; |
17714 | tmp_output = at::_ops::linalg_vector_norm::call(self_, ord, dim, keepdim, dtype); |
17715 | } |
17716 | at::functionalization::impl::replace_(out, tmp_output); |
17717 | at::functionalization::impl::commit_update(out); |
17718 | at::functionalization::impl::sync(out); |
17719 | return out; |
17720 | } |
17721 | } |
17722 | |
17723 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_out_result(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) { |
17724 | if (false) { |
17725 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17726 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17727 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17728 | auto A_meta = to_meta(A); |
17729 | auto B_meta = to_meta(B); |
17730 | auto result_meta = to_meta(result); |
17731 | auto LU_meta = to_meta(LU); |
17732 | auto pivots_meta = to_meta(pivots); |
17733 | auto info_meta = to_meta(info); |
17734 | at::AutoDispatchSkipFunctionalize func_guard; |
17735 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17736 | at::_ops::_linalg_solve_ex_result::call(A_meta, B_meta, left, check_errors, result_meta, LU_meta, pivots_meta, info_meta); |
17737 | } |
17738 | |
17739 | at::Tensor A_; |
17740 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
17741 | at::functionalization::impl::sync(A); |
17742 | A_ = at::functionalization::impl::from_functional_tensor(A); |
17743 | } else { |
17744 | A_ = A; |
17745 | } |
17746 | |
17747 | at::Tensor B_; |
17748 | if (at::functionalization::impl::isFunctionalTensor(B)) { |
17749 | at::functionalization::impl::sync(B); |
17750 | B_ = at::functionalization::impl::from_functional_tensor(B); |
17751 | } else { |
17752 | B_ = B; |
17753 | } |
17754 | |
17755 | at::Tensor result_; |
17756 | if (at::functionalization::impl::isFunctionalTensor(result)) { |
17757 | at::functionalization::impl::sync(result); |
17758 | result_ = at::functionalization::impl::from_functional_tensor(result); |
17759 | } else { |
17760 | result_ = result; |
17761 | } |
17762 | |
17763 | at::Tensor LU_; |
17764 | if (at::functionalization::impl::isFunctionalTensor(LU)) { |
17765 | at::functionalization::impl::sync(LU); |
17766 | LU_ = at::functionalization::impl::from_functional_tensor(LU); |
17767 | } else { |
17768 | LU_ = LU; |
17769 | } |
17770 | |
17771 | at::Tensor pivots_; |
17772 | if (at::functionalization::impl::isFunctionalTensor(pivots)) { |
17773 | at::functionalization::impl::sync(pivots); |
17774 | pivots_ = at::functionalization::impl::from_functional_tensor(pivots); |
17775 | } else { |
17776 | pivots_ = pivots; |
17777 | } |
17778 | |
17779 | at::Tensor info_; |
17780 | if (at::functionalization::impl::isFunctionalTensor(info)) { |
17781 | at::functionalization::impl::sync(info); |
17782 | info_ = at::functionalization::impl::from_functional_tensor(info); |
17783 | } else { |
17784 | info_ = info; |
17785 | } |
17786 | if (!(true && at::functionalization::impl::isFunctionalTensor(result) && at::functionalization::impl::isFunctionalTensor(LU) && at::functionalization::impl::isFunctionalTensor(pivots) && at::functionalization::impl::isFunctionalTensor(info))) { |
17787 | if ((false || at::functionalization::impl::isFunctionalTensor(A) || at::functionalization::impl::isFunctionalTensor(B))) { |
17788 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17789 | TORCH_INTERNAL_ASSERT(false, |
17790 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17791 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17792 | } else { |
17793 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17794 | at::AutoDispatchSkipFunctionalize guard; |
17795 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_linalg_solve_ex_result::call(A_, B_, left, check_errors, result_, LU_, pivots_, info_); |
17796 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(result, LU, pivots, info);; |
17797 | } |
17798 | } else { |
17799 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output; |
17800 | { |
17801 | at::AutoDispatchSkipFunctionalize guard; |
17802 | tmp_output = at::_ops::_linalg_solve_ex::call(A_, B_, left, check_errors); |
17803 | } |
17804 | at::functionalization::impl::replace_(result, std::get<0>(tmp_output)); |
17805 | at::functionalization::impl::commit_update(result); |
17806 | at::functionalization::impl::sync(result); |
17807 | at::functionalization::impl::replace_(LU, std::get<1>(tmp_output)); |
17808 | at::functionalization::impl::commit_update(LU); |
17809 | at::functionalization::impl::sync(LU); |
17810 | at::functionalization::impl::replace_(pivots, std::get<2>(tmp_output)); |
17811 | at::functionalization::impl::commit_update(pivots); |
17812 | at::functionalization::impl::sync(pivots); |
17813 | at::functionalization::impl::replace_(info, std::get<3>(tmp_output)); |
17814 | at::functionalization::impl::commit_update(info); |
17815 | at::functionalization::impl::sync(info); |
17816 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(result, LU, pivots, info); |
17817 | } |
17818 | } |
17819 | |
17820 | at::Tensor & linalg_solve_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) { |
17821 | if (false) { |
17822 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17823 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17824 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17825 | auto A_meta = to_meta(A); |
17826 | auto B_meta = to_meta(B); |
17827 | auto out_meta = to_meta(out); |
17828 | at::AutoDispatchSkipFunctionalize func_guard; |
17829 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17830 | at::_ops::linalg_solve_out::call(A_meta, B_meta, left, out_meta); |
17831 | } |
17832 | |
17833 | at::Tensor A_; |
17834 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
17835 | at::functionalization::impl::sync(A); |
17836 | A_ = at::functionalization::impl::from_functional_tensor(A); |
17837 | } else { |
17838 | A_ = A; |
17839 | } |
17840 | |
17841 | at::Tensor B_; |
17842 | if (at::functionalization::impl::isFunctionalTensor(B)) { |
17843 | at::functionalization::impl::sync(B); |
17844 | B_ = at::functionalization::impl::from_functional_tensor(B); |
17845 | } else { |
17846 | B_ = B; |
17847 | } |
17848 | |
17849 | at::Tensor out_; |
17850 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17851 | at::functionalization::impl::sync(out); |
17852 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17853 | } else { |
17854 | out_ = out; |
17855 | } |
17856 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17857 | if ((false || at::functionalization::impl::isFunctionalTensor(A) || at::functionalization::impl::isFunctionalTensor(B))) { |
17858 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17859 | TORCH_INTERNAL_ASSERT(false, |
17860 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17861 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17862 | } else { |
17863 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17864 | at::AutoDispatchSkipFunctionalize guard; |
17865 | at::Tensor tmp_output = at::_ops::linalg_solve_out::call(A_, B_, left, out_); |
17866 | return out;; |
17867 | } |
17868 | } else { |
17869 | at::Tensor tmp_output; |
17870 | { |
17871 | at::AutoDispatchSkipFunctionalize guard; |
17872 | tmp_output = at::_ops::linalg_solve::call(A_, B_, left); |
17873 | } |
17874 | at::functionalization::impl::replace_(out, tmp_output); |
17875 | at::functionalization::impl::commit_update(out); |
17876 | at::functionalization::impl::sync(out); |
17877 | return out; |
17878 | } |
17879 | } |
17880 | |
17881 | at::Tensor & linalg_multi_dot_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { |
17882 | if (false) { |
17883 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17884 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17885 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17886 | auto tensors_meta = to_meta(tensors); |
17887 | auto out_meta = to_meta(out); |
17888 | at::AutoDispatchSkipFunctionalize func_guard; |
17889 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17890 | at::_ops::linalg_multi_dot_out::call(tensors_meta, out_meta); |
17891 | } |
17892 | |
17893 | ::std::vector<at::Tensor> tensors_; |
17894 | if (at::functionalization::impl::isFunctionalTensor(tensors)) { |
17895 | at::functionalization::impl::sync(tensors); |
17896 | tensors_ = at::functionalization::impl::from_functional_tensor(tensors); |
17897 | } else { |
17898 | tensors_ = tensors.vec(); |
17899 | } |
17900 | |
17901 | at::Tensor out_; |
17902 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17903 | at::functionalization::impl::sync(out); |
17904 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17905 | } else { |
17906 | out_ = out; |
17907 | } |
17908 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17909 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) { |
17910 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17911 | TORCH_INTERNAL_ASSERT(false, |
17912 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17913 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17914 | } else { |
17915 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17916 | at::AutoDispatchSkipFunctionalize guard; |
17917 | at::Tensor tmp_output = at::_ops::linalg_multi_dot_out::call(tensors_, out_); |
17918 | return out;; |
17919 | } |
17920 | } else { |
17921 | at::Tensor tmp_output; |
17922 | { |
17923 | at::AutoDispatchSkipFunctionalize guard; |
17924 | tmp_output = at::_ops::linalg_multi_dot::call(tensors_); |
17925 | } |
17926 | at::functionalization::impl::replace_(out, tmp_output); |
17927 | at::functionalization::impl::commit_update(out); |
17928 | at::functionalization::impl::sync(out); |
17929 | return out; |
17930 | } |
17931 | } |
17932 | |
17933 | at::Tensor & _test_optional_filled_intlist_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) { |
17934 | if (false) { |
17935 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17936 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17937 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17938 | auto values_meta = to_meta(values); |
17939 | auto out_meta = to_meta(out); |
17940 | at::AutoDispatchSkipFunctionalize func_guard; |
17941 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17942 | at::_ops::_test_optional_filled_intlist_out::call(values_meta, addends, out_meta); |
17943 | } |
17944 | |
17945 | at::Tensor values_; |
17946 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
17947 | at::functionalization::impl::sync(values); |
17948 | values_ = at::functionalization::impl::from_functional_tensor(values); |
17949 | } else { |
17950 | values_ = values; |
17951 | } |
17952 | |
17953 | at::Tensor out_; |
17954 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17955 | at::functionalization::impl::sync(out); |
17956 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17957 | } else { |
17958 | out_ = out; |
17959 | } |
17960 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17961 | if ((false || at::functionalization::impl::isFunctionalTensor(values))) { |
17962 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17963 | TORCH_INTERNAL_ASSERT(false, |
17964 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17965 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17966 | } else { |
17967 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17968 | at::AutoDispatchSkipFunctionalize guard; |
17969 | at::Tensor tmp_output = at::_ops::_test_optional_filled_intlist_out::call(values_, addends, out_); |
17970 | return out;; |
17971 | } |
17972 | } else { |
17973 | at::Tensor tmp_output; |
17974 | { |
17975 | at::AutoDispatchSkipFunctionalize guard; |
17976 | tmp_output = at::_ops::_test_optional_filled_intlist::call(values_, addends); |
17977 | } |
17978 | at::functionalization::impl::replace_(out, tmp_output); |
17979 | at::functionalization::impl::commit_update(out); |
17980 | at::functionalization::impl::sync(out); |
17981 | return out; |
17982 | } |
17983 | } |
17984 | |
17985 | at::Tensor & _test_autograd_multiple_dispatch_out_fullcoverage_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
17986 | if (false) { |
17987 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17988 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17989 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17990 | auto self_meta = to_meta(self); |
17991 | auto out_meta = to_meta(out); |
17992 | at::AutoDispatchSkipFunctionalize func_guard; |
17993 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17994 | at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::call(self_meta, out_meta); |
17995 | } |
17996 | |
17997 | at::Tensor self_; |
17998 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17999 | at::functionalization::impl::sync(self); |
18000 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18001 | } else { |
18002 | self_ = self; |
18003 | } |
18004 | |
18005 | at::Tensor out_; |
18006 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18007 | at::functionalization::impl::sync(out); |
18008 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18009 | } else { |
18010 | out_ = out; |
18011 | } |
18012 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18013 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18014 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18015 | TORCH_INTERNAL_ASSERT(false, |
18016 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18017 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18018 | } else { |
18019 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18020 | at::AutoDispatchSkipFunctionalize guard; |
18021 | at::Tensor tmp_output = at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::call(self_, out_); |
18022 | return out;; |
18023 | } |
18024 | } else { |
18025 | at::Tensor tmp_output; |
18026 | { |
18027 | at::AutoDispatchSkipFunctionalize guard; |
18028 | tmp_output = at::_ops::_test_autograd_multiple_dispatch_fullcoverage::call(self_); |
18029 | } |
18030 | at::functionalization::impl::replace_(out, tmp_output); |
18031 | at::functionalization::impl::commit_update(out); |
18032 | at::functionalization::impl::sync(out); |
18033 | return out; |
18034 | } |
18035 | } |
18036 | |
18037 | at::Tensor & _test_autograd_multiple_dispatch_view_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18038 | if (false) { |
18039 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18040 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18041 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18042 | auto self_meta = to_meta(self); |
18043 | auto out_meta = to_meta(out); |
18044 | at::AutoDispatchSkipFunctionalize func_guard; |
18045 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18046 | at::_ops::_test_autograd_multiple_dispatch_view_copy_out::call(self_meta, out_meta); |
18047 | } |
18048 | |
18049 | at::Tensor self_; |
18050 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18051 | at::functionalization::impl::sync(self); |
18052 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18053 | } else { |
18054 | self_ = self; |
18055 | } |
18056 | |
18057 | at::Tensor out_; |
18058 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18059 | at::functionalization::impl::sync(out); |
18060 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18061 | } else { |
18062 | out_ = out; |
18063 | } |
18064 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18065 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18066 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18067 | TORCH_INTERNAL_ASSERT(false, |
18068 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18069 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18070 | } else { |
18071 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18072 | at::AutoDispatchSkipFunctionalize guard; |
18073 | at::Tensor tmp_output = at::_ops::_test_autograd_multiple_dispatch_view_copy_out::call(self_, out_); |
18074 | return out;; |
18075 | } |
18076 | } else { |
18077 | at::Tensor tmp_output; |
18078 | { |
18079 | at::AutoDispatchSkipFunctionalize guard; |
18080 | tmp_output = at::_ops::_test_autograd_multiple_dispatch_view_copy::call(self_); |
18081 | } |
18082 | at::functionalization::impl::replace_(out, tmp_output); |
18083 | at::functionalization::impl::commit_update(out); |
18084 | at::functionalization::impl::sync(out); |
18085 | return out; |
18086 | } |
18087 | } |
18088 | |
18089 | at::Tensor & segment_reduce_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial, at::Tensor & out) { |
18090 | if (false) { |
18091 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18092 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18093 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18094 | auto data_meta = to_meta(data); |
18095 | auto lengths_meta = to_meta(lengths); |
18096 | auto indices_meta = to_meta(indices); |
18097 | auto offsets_meta = to_meta(offsets); |
18098 | auto out_meta = to_meta(out); |
18099 | at::AutoDispatchSkipFunctionalize func_guard; |
18100 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18101 | at::_ops::segment_reduce_out::call(data_meta, reduce, lengths_meta, indices_meta, offsets_meta, axis, unsafe, initial, out_meta); |
18102 | } |
18103 | |
18104 | at::Tensor data_; |
18105 | if (at::functionalization::impl::isFunctionalTensor(data)) { |
18106 | at::functionalization::impl::sync(data); |
18107 | data_ = at::functionalization::impl::from_functional_tensor(data); |
18108 | } else { |
18109 | data_ = data; |
18110 | } |
18111 | |
18112 | c10::optional<at::Tensor> lengths_; |
18113 | if (at::functionalization::impl::isFunctionalTensor(lengths)) { |
18114 | at::functionalization::impl::sync(lengths); |
18115 | lengths_ = at::functionalization::impl::from_functional_tensor(lengths); |
18116 | } else { |
18117 | lengths_ = lengths; |
18118 | } |
18119 | |
18120 | c10::optional<at::Tensor> indices_; |
18121 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
18122 | at::functionalization::impl::sync(indices); |
18123 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
18124 | } else { |
18125 | indices_ = indices; |
18126 | } |
18127 | |
18128 | c10::optional<at::Tensor> offsets_; |
18129 | if (at::functionalization::impl::isFunctionalTensor(offsets)) { |
18130 | at::functionalization::impl::sync(offsets); |
18131 | offsets_ = at::functionalization::impl::from_functional_tensor(offsets); |
18132 | } else { |
18133 | offsets_ = offsets; |
18134 | } |
18135 | |
18136 | at::Tensor out_; |
18137 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18138 | at::functionalization::impl::sync(out); |
18139 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18140 | } else { |
18141 | out_ = out; |
18142 | } |
18143 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18144 | if ((false || at::functionalization::impl::isFunctionalTensor(data) || at::functionalization::impl::isFunctionalTensor(lengths) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(offsets))) { |
18145 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18146 | TORCH_INTERNAL_ASSERT(false, |
18147 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18148 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18149 | } else { |
18150 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18151 | at::AutoDispatchSkipFunctionalize guard; |
18152 | at::Tensor tmp_output = at::_ops::segment_reduce_out::call(data_, reduce, lengths_, indices_, offsets_, axis, unsafe, initial, out_); |
18153 | return out;; |
18154 | } |
18155 | } else { |
18156 | at::Tensor tmp_output; |
18157 | { |
18158 | at::AutoDispatchSkipFunctionalize guard; |
18159 | tmp_output = at::_ops::segment_reduce::call(data_, reduce, lengths_, indices_, offsets_, axis, unsafe, initial); |
18160 | } |
18161 | at::functionalization::impl::replace_(out, tmp_output); |
18162 | at::functionalization::impl::commit_update(out); |
18163 | at::functionalization::impl::sync(out); |
18164 | return out; |
18165 | } |
18166 | } |
18167 | |
18168 | at::Tensor & _nested_tensor_from_tensor_list_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList list, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, at::Tensor & out) { |
18169 | if (false) { |
18170 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18171 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18172 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18173 | auto list_meta = to_meta(list); |
18174 | auto out_meta = to_meta(out); |
18175 | at::AutoDispatchSkipFunctionalize func_guard; |
18176 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18177 | at::_ops::_nested_tensor_from_tensor_list_out::call(list_meta, dtype, layout, device, pin_memory, out_meta); |
18178 | } |
18179 | |
18180 | ::std::vector<at::Tensor> list_; |
18181 | if (at::functionalization::impl::isFunctionalTensor(list)) { |
18182 | at::functionalization::impl::sync(list); |
18183 | list_ = at::functionalization::impl::from_functional_tensor(list); |
18184 | } else { |
18185 | list_ = list.vec(); |
18186 | } |
18187 | |
18188 | at::Tensor out_; |
18189 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18190 | at::functionalization::impl::sync(out); |
18191 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18192 | } else { |
18193 | out_ = out; |
18194 | } |
18195 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18196 | if ((false || at::functionalization::impl::isFunctionalTensor(list))) { |
18197 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18198 | TORCH_INTERNAL_ASSERT(false, |
18199 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18200 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18201 | } else { |
18202 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18203 | at::AutoDispatchSkipFunctionalize guard; |
18204 | at::Tensor tmp_output = at::_ops::_nested_tensor_from_tensor_list_out::call(list_, dtype, layout, device, pin_memory, out_); |
18205 | return out;; |
18206 | } |
18207 | } else { |
18208 | at::Tensor tmp_output; |
18209 | { |
18210 | at::AutoDispatchSkipFunctionalize guard; |
18211 | tmp_output = at::_ops::_nested_tensor_from_tensor_list::call(list_, dtype, layout, device, pin_memory); |
18212 | } |
18213 | at::functionalization::impl::replace_(out, tmp_output); |
18214 | at::functionalization::impl::commit_update(out); |
18215 | at::functionalization::impl::sync(out); |
18216 | return out; |
18217 | } |
18218 | } |
18219 | |
18220 | at::Tensor & diagonal_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { |
18221 | if (false) { |
18222 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18223 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18224 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18225 | auto self_meta = to_meta(self); |
18226 | auto out_meta = to_meta(out); |
18227 | at::AutoDispatchSkipFunctionalize func_guard; |
18228 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18229 | at::_ops::diagonal_copy_out::call(self_meta, offset, dim1, dim2, out_meta); |
18230 | } |
18231 | |
18232 | at::Tensor self_; |
18233 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18234 | at::functionalization::impl::sync(self); |
18235 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18236 | } else { |
18237 | self_ = self; |
18238 | } |
18239 | |
18240 | at::Tensor out_; |
18241 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18242 | at::functionalization::impl::sync(out); |
18243 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18244 | } else { |
18245 | out_ = out; |
18246 | } |
18247 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18248 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18249 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18250 | TORCH_INTERNAL_ASSERT(false, |
18251 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18252 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18253 | } else { |
18254 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18255 | at::AutoDispatchSkipFunctionalize guard; |
18256 | at::Tensor tmp_output = at::_ops::diagonal_copy_out::call(self_, offset, dim1, dim2, out_); |
18257 | return out;; |
18258 | } |
18259 | } else { |
18260 | at::Tensor tmp_output; |
18261 | { |
18262 | at::AutoDispatchSkipFunctionalize guard; |
18263 | tmp_output = at::_ops::diagonal_copy::call(self_, offset, dim1, dim2); |
18264 | } |
18265 | at::functionalization::impl::replace_(out, tmp_output); |
18266 | at::functionalization::impl::commit_update(out); |
18267 | at::functionalization::impl::sync(out); |
18268 | return out; |
18269 | } |
18270 | } |
18271 | |
18272 | at::Tensor & detach_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18273 | if (false) { |
18274 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18275 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18276 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18277 | auto self_meta = to_meta(self); |
18278 | auto out_meta = to_meta(out); |
18279 | at::AutoDispatchSkipFunctionalize func_guard; |
18280 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18281 | at::_ops::detach_copy_out::call(self_meta, out_meta); |
18282 | } |
18283 | |
18284 | at::Tensor self_; |
18285 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18286 | at::functionalization::impl::sync(self); |
18287 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18288 | } else { |
18289 | self_ = self; |
18290 | } |
18291 | |
18292 | at::Tensor out_; |
18293 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18294 | at::functionalization::impl::sync(out); |
18295 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18296 | } else { |
18297 | out_ = out; |
18298 | } |
18299 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18300 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18301 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18302 | TORCH_INTERNAL_ASSERT(false, |
18303 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18304 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18305 | } else { |
18306 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18307 | at::AutoDispatchSkipFunctionalize guard; |
18308 | at::Tensor tmp_output = at::_ops::detach_copy_out::call(self_, out_); |
18309 | return out;; |
18310 | } |
18311 | } else { |
18312 | at::Tensor tmp_output; |
18313 | { |
18314 | at::AutoDispatchSkipFunctionalize guard; |
18315 | tmp_output = at::_ops::detach_copy::call(self_); |
18316 | } |
18317 | at::functionalization::impl::replace_(out, tmp_output); |
18318 | at::functionalization::impl::commit_update(out); |
18319 | at::functionalization::impl::sync(out); |
18320 | return out; |
18321 | } |
18322 | } |
18323 | |
18324 | at::Tensor & slice_copy_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) { |
18325 | if (false) { |
18326 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18327 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18328 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18329 | auto self_meta = to_meta(self); |
18330 | auto out_meta = to_meta(out); |
18331 | at::AutoDispatchSkipFunctionalize func_guard; |
18332 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18333 | at::_ops::slice_copy_Tensor_out::call(self_meta, dim, start, end, step, out_meta); |
18334 | } |
18335 | |
18336 | at::Tensor self_; |
18337 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18338 | at::functionalization::impl::sync(self); |
18339 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18340 | } else { |
18341 | self_ = self; |
18342 | } |
18343 | |
18344 | at::Tensor out_; |
18345 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18346 | at::functionalization::impl::sync(out); |
18347 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18348 | } else { |
18349 | out_ = out; |
18350 | } |
18351 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18352 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18353 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18354 | TORCH_INTERNAL_ASSERT(false, |
18355 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18356 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18357 | } else { |
18358 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18359 | at::AutoDispatchSkipFunctionalize guard; |
18360 | at::Tensor tmp_output = at::_ops::slice_copy_Tensor_out::call(self_, dim, start, end, step, out_); |
18361 | return out;; |
18362 | } |
18363 | } else { |
18364 | at::Tensor tmp_output; |
18365 | { |
18366 | at::AutoDispatchSkipFunctionalize guard; |
18367 | tmp_output = at::_ops::slice_copy_Tensor::call(self_, dim, start, end, step); |
18368 | } |
18369 | at::functionalization::impl::replace_(out, tmp_output); |
18370 | at::functionalization::impl::commit_update(out); |
18371 | at::functionalization::impl::sync(out); |
18372 | return out; |
18373 | } |
18374 | } |
18375 | |
18376 | at::Tensor & transpose_copy_out_int_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { |
18377 | if (false) { |
18378 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18379 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18380 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18381 | auto self_meta = to_meta(self); |
18382 | auto out_meta = to_meta(out); |
18383 | at::AutoDispatchSkipFunctionalize func_guard; |
18384 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18385 | at::_ops::transpose_copy_int_out::call(self_meta, dim0, dim1, out_meta); |
18386 | } |
18387 | |
18388 | at::Tensor self_; |
18389 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18390 | at::functionalization::impl::sync(self); |
18391 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18392 | } else { |
18393 | self_ = self; |
18394 | } |
18395 | |
18396 | at::Tensor out_; |
18397 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18398 | at::functionalization::impl::sync(out); |
18399 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18400 | } else { |
18401 | out_ = out; |
18402 | } |
18403 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18404 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18405 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18406 | TORCH_INTERNAL_ASSERT(false, |
18407 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18408 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18409 | } else { |
18410 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18411 | at::AutoDispatchSkipFunctionalize guard; |
18412 | at::Tensor tmp_output = at::_ops::transpose_copy_int_out::call(self_, dim0, dim1, out_); |
18413 | return out;; |
18414 | } |
18415 | } else { |
18416 | at::Tensor tmp_output; |
18417 | { |
18418 | at::AutoDispatchSkipFunctionalize guard; |
18419 | tmp_output = at::_ops::transpose_copy_int::call(self_, dim0, dim1); |
18420 | } |
18421 | at::functionalization::impl::replace_(out, tmp_output); |
18422 | at::functionalization::impl::commit_update(out); |
18423 | at::functionalization::impl::sync(out); |
18424 | return out; |
18425 | } |
18426 | } |
18427 | |
18428 | at::Tensor & indices_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18429 | if (false) { |
18430 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18431 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18432 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18433 | auto self_meta = to_meta(self); |
18434 | auto out_meta = to_meta(out); |
18435 | at::AutoDispatchSkipFunctionalize func_guard; |
18436 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18437 | at::_ops::indices_copy_out::call(self_meta, out_meta); |
18438 | } |
18439 | |
18440 | at::Tensor self_; |
18441 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18442 | at::functionalization::impl::sync(self); |
18443 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18444 | } else { |
18445 | self_ = self; |
18446 | } |
18447 | |
18448 | at::Tensor out_; |
18449 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18450 | at::functionalization::impl::sync(out); |
18451 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18452 | } else { |
18453 | out_ = out; |
18454 | } |
18455 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18456 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18457 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18458 | TORCH_INTERNAL_ASSERT(false, |
18459 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18460 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18461 | } else { |
18462 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18463 | at::AutoDispatchSkipFunctionalize guard; |
18464 | at::Tensor tmp_output = at::_ops::indices_copy_out::call(self_, out_); |
18465 | return out;; |
18466 | } |
18467 | } else { |
18468 | at::Tensor tmp_output; |
18469 | { |
18470 | at::AutoDispatchSkipFunctionalize guard; |
18471 | tmp_output = at::_ops::indices_copy::call(self_); |
18472 | } |
18473 | at::functionalization::impl::replace_(out, tmp_output); |
18474 | at::functionalization::impl::commit_update(out); |
18475 | at::functionalization::impl::sync(out); |
18476 | return out; |
18477 | } |
18478 | } |
18479 | |
18480 | at::Tensor & row_indices_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18481 | if (false) { |
18482 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18483 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18484 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18485 | auto self_meta = to_meta(self); |
18486 | auto out_meta = to_meta(out); |
18487 | at::AutoDispatchSkipFunctionalize func_guard; |
18488 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18489 | at::_ops::row_indices_copy_out::call(self_meta, out_meta); |
18490 | } |
18491 | |
18492 | at::Tensor self_; |
18493 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18494 | at::functionalization::impl::sync(self); |
18495 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18496 | } else { |
18497 | self_ = self; |
18498 | } |
18499 | |
18500 | at::Tensor out_; |
18501 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18502 | at::functionalization::impl::sync(out); |
18503 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18504 | } else { |
18505 | out_ = out; |
18506 | } |
18507 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18508 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18509 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18510 | TORCH_INTERNAL_ASSERT(false, |
18511 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18512 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18513 | } else { |
18514 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18515 | at::AutoDispatchSkipFunctionalize guard; |
18516 | at::Tensor tmp_output = at::_ops::row_indices_copy_out::call(self_, out_); |
18517 | return out;; |
18518 | } |
18519 | } else { |
18520 | at::Tensor tmp_output; |
18521 | { |
18522 | at::AutoDispatchSkipFunctionalize guard; |
18523 | tmp_output = at::_ops::row_indices_copy::call(self_); |
18524 | } |
18525 | at::functionalization::impl::replace_(out, tmp_output); |
18526 | at::functionalization::impl::commit_update(out); |
18527 | at::functionalization::impl::sync(out); |
18528 | return out; |
18529 | } |
18530 | } |
18531 | |
18532 | at::Tensor & _triton_multi_head_attention_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, at::Tensor & out) { |
18533 | if (false) { |
18534 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18535 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18536 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18537 | auto query_meta = to_meta(query); |
18538 | auto key_meta = to_meta(key); |
18539 | auto value_meta = to_meta(value); |
18540 | auto qkv_weight_meta = to_meta(qkv_weight); |
18541 | auto qkv_bias_meta = to_meta(qkv_bias); |
18542 | auto proj_weight_meta = to_meta(proj_weight); |
18543 | auto proj_bias_meta = to_meta(proj_bias); |
18544 | auto mask_meta = to_meta(mask); |
18545 | auto out_meta = to_meta(out); |
18546 | at::AutoDispatchSkipFunctionalize func_guard; |
18547 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18548 | at::_ops::_triton_multi_head_attention_out::call(query_meta, key_meta, value_meta, embed_dim, num_head, qkv_weight_meta, qkv_bias_meta, proj_weight_meta, proj_bias_meta, mask_meta, out_meta); |
18549 | } |
18550 | |
18551 | at::Tensor query_; |
18552 | if (at::functionalization::impl::isFunctionalTensor(query)) { |
18553 | at::functionalization::impl::sync(query); |
18554 | query_ = at::functionalization::impl::from_functional_tensor(query); |
18555 | } else { |
18556 | query_ = query; |
18557 | } |
18558 | |
18559 | at::Tensor key_; |
18560 | if (at::functionalization::impl::isFunctionalTensor(key)) { |
18561 | at::functionalization::impl::sync(key); |
18562 | key_ = at::functionalization::impl::from_functional_tensor(key); |
18563 | } else { |
18564 | key_ = key; |
18565 | } |
18566 | |
18567 | at::Tensor value_; |
18568 | if (at::functionalization::impl::isFunctionalTensor(value)) { |
18569 | at::functionalization::impl::sync(value); |
18570 | value_ = at::functionalization::impl::from_functional_tensor(value); |
18571 | } else { |
18572 | value_ = value; |
18573 | } |
18574 | |
18575 | at::Tensor qkv_weight_; |
18576 | if (at::functionalization::impl::isFunctionalTensor(qkv_weight)) { |
18577 | at::functionalization::impl::sync(qkv_weight); |
18578 | qkv_weight_ = at::functionalization::impl::from_functional_tensor(qkv_weight); |
18579 | } else { |
18580 | qkv_weight_ = qkv_weight; |
18581 | } |
18582 | |
18583 | at::Tensor qkv_bias_; |
18584 | if (at::functionalization::impl::isFunctionalTensor(qkv_bias)) { |
18585 | at::functionalization::impl::sync(qkv_bias); |
18586 | qkv_bias_ = at::functionalization::impl::from_functional_tensor(qkv_bias); |
18587 | } else { |
18588 | qkv_bias_ = qkv_bias; |
18589 | } |
18590 | |
18591 | at::Tensor proj_weight_; |
18592 | if (at::functionalization::impl::isFunctionalTensor(proj_weight)) { |
18593 | at::functionalization::impl::sync(proj_weight); |
18594 | proj_weight_ = at::functionalization::impl::from_functional_tensor(proj_weight); |
18595 | } else { |
18596 | proj_weight_ = proj_weight; |
18597 | } |
18598 | |
18599 | at::Tensor proj_bias_; |
18600 | if (at::functionalization::impl::isFunctionalTensor(proj_bias)) { |
18601 | at::functionalization::impl::sync(proj_bias); |
18602 | proj_bias_ = at::functionalization::impl::from_functional_tensor(proj_bias); |
18603 | } else { |
18604 | proj_bias_ = proj_bias; |
18605 | } |
18606 | |
18607 | c10::optional<at::Tensor> mask_; |
18608 | if (at::functionalization::impl::isFunctionalTensor(mask)) { |
18609 | at::functionalization::impl::sync(mask); |
18610 | mask_ = at::functionalization::impl::from_functional_tensor(mask); |
18611 | } else { |
18612 | mask_ = mask; |
18613 | } |
18614 | |
18615 | at::Tensor out_; |
18616 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18617 | at::functionalization::impl::sync(out); |
18618 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18619 | } else { |
18620 | out_ = out; |
18621 | } |
18622 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18623 | if ((false || at::functionalization::impl::isFunctionalTensor(query) || at::functionalization::impl::isFunctionalTensor(key) || at::functionalization::impl::isFunctionalTensor(value) || at::functionalization::impl::isFunctionalTensor(qkv_weight) || at::functionalization::impl::isFunctionalTensor(qkv_bias) || at::functionalization::impl::isFunctionalTensor(proj_weight) || at::functionalization::impl::isFunctionalTensor(proj_bias) || at::functionalization::impl::isFunctionalTensor(mask))) { |
18624 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18625 | TORCH_INTERNAL_ASSERT(false, |
18626 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18627 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18628 | } else { |
18629 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18630 | at::AutoDispatchSkipFunctionalize guard; |
18631 | at::Tensor tmp_output = at::_ops::_triton_multi_head_attention_out::call(query_, key_, value_, embed_dim, num_head, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, mask_, out_); |
18632 | return out;; |
18633 | } |
18634 | } else { |
18635 | at::Tensor tmp_output; |
18636 | { |
18637 | at::AutoDispatchSkipFunctionalize guard; |
18638 | tmp_output = at::_ops::_triton_multi_head_attention::call(query_, key_, value_, embed_dim, num_head, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, mask_); |
18639 | } |
18640 | at::functionalization::impl::replace_(out, tmp_output); |
18641 | at::functionalization::impl::commit_update(out); |
18642 | at::functionalization::impl::sync(out); |
18643 | return out; |
18644 | } |
18645 | } |
18646 | |
18647 | at::Tensor & special_bessel_j1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18648 | if (false) { |
18649 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18650 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18651 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18652 | auto self_meta = to_meta(self); |
18653 | auto out_meta = to_meta(out); |
18654 | at::AutoDispatchSkipFunctionalize func_guard; |
18655 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18656 | at::_ops::special_bessel_j1_out::call(self_meta, out_meta); |
18657 | } |
18658 | |
18659 | at::Tensor self_; |
18660 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18661 | at::functionalization::impl::sync(self); |
18662 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18663 | } else { |
18664 | self_ = self; |
18665 | } |
18666 | |
18667 | at::Tensor out_; |
18668 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18669 | at::functionalization::impl::sync(out); |
18670 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18671 | } else { |
18672 | out_ = out; |
18673 | } |
18674 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18675 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18676 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18677 | TORCH_INTERNAL_ASSERT(false, |
18678 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18679 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18680 | } else { |
18681 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18682 | at::AutoDispatchSkipFunctionalize guard; |
18683 | at::Tensor tmp_output = at::_ops::special_bessel_j1_out::call(self_, out_); |
18684 | return out;; |
18685 | } |
18686 | } else { |
18687 | at::Tensor tmp_output; |
18688 | { |
18689 | at::AutoDispatchSkipFunctionalize guard; |
18690 | tmp_output = at::_ops::special_bessel_j1::call(self_); |
18691 | } |
18692 | at::functionalization::impl::replace_(out, tmp_output); |
18693 | at::functionalization::impl::commit_update(out); |
18694 | at::functionalization::impl::sync(out); |
18695 | return out; |
18696 | } |
18697 | } |
18698 | |
18699 | at::Tensor & special_bessel_y1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18700 | if (false) { |
18701 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18702 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18703 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18704 | auto self_meta = to_meta(self); |
18705 | auto out_meta = to_meta(out); |
18706 | at::AutoDispatchSkipFunctionalize func_guard; |
18707 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18708 | at::_ops::special_bessel_y1_out::call(self_meta, out_meta); |
18709 | } |
18710 | |
18711 | at::Tensor self_; |
18712 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18713 | at::functionalization::impl::sync(self); |
18714 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18715 | } else { |
18716 | self_ = self; |
18717 | } |
18718 | |
18719 | at::Tensor out_; |
18720 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18721 | at::functionalization::impl::sync(out); |
18722 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18723 | } else { |
18724 | out_ = out; |
18725 | } |
18726 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18727 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18728 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18729 | TORCH_INTERNAL_ASSERT(false, |
18730 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18731 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18732 | } else { |
18733 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18734 | at::AutoDispatchSkipFunctionalize guard; |
18735 | at::Tensor tmp_output = at::_ops::special_bessel_y1_out::call(self_, out_); |
18736 | return out;; |
18737 | } |
18738 | } else { |
18739 | at::Tensor tmp_output; |
18740 | { |
18741 | at::AutoDispatchSkipFunctionalize guard; |
18742 | tmp_output = at::_ops::special_bessel_y1::call(self_); |
18743 | } |
18744 | at::functionalization::impl::replace_(out, tmp_output); |
18745 | at::functionalization::impl::commit_update(out); |
18746 | at::functionalization::impl::sync(out); |
18747 | return out; |
18748 | } |
18749 | } |
18750 | |
18751 | at::Tensor & special_legendre_polynomial_p_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { |
18752 | if (false) { |
18753 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18754 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18755 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18756 | auto x_meta = to_meta(x); |
18757 | auto n_meta = to_meta(n); |
18758 | auto out_meta = to_meta(out); |
18759 | at::AutoDispatchSkipFunctionalize func_guard; |
18760 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18761 | at::_ops::special_legendre_polynomial_p_out::call(x_meta, n_meta, out_meta); |
18762 | } |
18763 | |
18764 | at::Tensor x_; |
18765 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
18766 | at::functionalization::impl::sync(x); |
18767 | x_ = at::functionalization::impl::from_functional_tensor(x); |
18768 | } else { |
18769 | x_ = x; |
18770 | } |
18771 | |
18772 | at::Tensor n_; |
18773 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
18774 | at::functionalization::impl::sync(n); |
18775 | n_ = at::functionalization::impl::from_functional_tensor(n); |
18776 | } else { |
18777 | n_ = n; |
18778 | } |
18779 | |
18780 | at::Tensor out_; |
18781 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18782 | at::functionalization::impl::sync(out); |
18783 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18784 | } else { |
18785 | out_ = out; |
18786 | } |
18787 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18788 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) { |
18789 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18790 | TORCH_INTERNAL_ASSERT(false, |
18791 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18792 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18793 | } else { |
18794 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18795 | at::AutoDispatchSkipFunctionalize guard; |
18796 | at::Tensor tmp_output = at::_ops::special_legendre_polynomial_p_out::call(x_, n_, out_); |
18797 | return out;; |
18798 | } |
18799 | } else { |
18800 | at::Tensor tmp_output; |
18801 | { |
18802 | at::AutoDispatchSkipFunctionalize guard; |
18803 | tmp_output = at::_ops::special_legendre_polynomial_p::call(x_, n_); |
18804 | } |
18805 | at::functionalization::impl::replace_(out, tmp_output); |
18806 | at::functionalization::impl::commit_update(out); |
18807 | at::functionalization::impl::sync(out); |
18808 | return out; |
18809 | } |
18810 | } |
18811 | |
18812 | at::Tensor & special_legendre_polynomial_p_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
18813 | if (false) { |
18814 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18815 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18816 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18817 | auto n_meta = to_meta(n); |
18818 | auto out_meta = to_meta(out); |
18819 | at::AutoDispatchSkipFunctionalize func_guard; |
18820 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18821 | at::_ops::special_legendre_polynomial_p_x_scalar_out::call(x, n_meta, out_meta); |
18822 | } |
18823 | |
18824 | at::Tensor n_; |
18825 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
18826 | at::functionalization::impl::sync(n); |
18827 | n_ = at::functionalization::impl::from_functional_tensor(n); |
18828 | } else { |
18829 | n_ = n; |
18830 | } |
18831 | |
18832 | at::Tensor out_; |
18833 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18834 | at::functionalization::impl::sync(out); |
18835 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18836 | } else { |
18837 | out_ = out; |
18838 | } |
18839 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18840 | if ((false || at::functionalization::impl::isFunctionalTensor(n))) { |
18841 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18842 | TORCH_INTERNAL_ASSERT(false, |
18843 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18844 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18845 | } else { |
18846 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18847 | at::AutoDispatchSkipFunctionalize guard; |
18848 | at::Tensor tmp_output = at::_ops::special_legendre_polynomial_p_x_scalar_out::call(x, n_, out_); |
18849 | return out;; |
18850 | } |
18851 | } else { |
18852 | at::Tensor tmp_output; |
18853 | { |
18854 | at::AutoDispatchSkipFunctionalize guard; |
18855 | tmp_output = at::_ops::special_legendre_polynomial_p_x_scalar::call(x, n_); |
18856 | } |
18857 | at::functionalization::impl::replace_(out, tmp_output); |
18858 | at::functionalization::impl::commit_update(out); |
18859 | at::functionalization::impl::sync(out); |
18860 | return out; |
18861 | } |
18862 | } |
18863 | |
18864 | at::Tensor & special_legendre_polynomial_p_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
18865 | if (false) { |
18866 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18867 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18868 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18869 | auto x_meta = to_meta(x); |
18870 | auto out_meta = to_meta(out); |
18871 | at::AutoDispatchSkipFunctionalize func_guard; |
18872 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18873 | at::_ops::special_legendre_polynomial_p_n_scalar_out::call(x_meta, n, out_meta); |
18874 | } |
18875 | |
18876 | at::Tensor x_; |
18877 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
18878 | at::functionalization::impl::sync(x); |
18879 | x_ = at::functionalization::impl::from_functional_tensor(x); |
18880 | } else { |
18881 | x_ = x; |
18882 | } |
18883 | |
18884 | at::Tensor out_; |
18885 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18886 | at::functionalization::impl::sync(out); |
18887 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18888 | } else { |
18889 | out_ = out; |
18890 | } |
18891 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18892 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
18893 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18894 | TORCH_INTERNAL_ASSERT(false, |
18895 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18896 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18897 | } else { |
18898 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18899 | at::AutoDispatchSkipFunctionalize guard; |
18900 | at::Tensor tmp_output = at::_ops::special_legendre_polynomial_p_n_scalar_out::call(x_, n, out_); |
18901 | return out;; |
18902 | } |
18903 | } else { |
18904 | at::Tensor tmp_output; |
18905 | { |
18906 | at::AutoDispatchSkipFunctionalize guard; |
18907 | tmp_output = at::_ops::special_legendre_polynomial_p_n_scalar::call(x_, n); |
18908 | } |
18909 | at::functionalization::impl::replace_(out, tmp_output); |
18910 | at::functionalization::impl::commit_update(out); |
18911 | at::functionalization::impl::sync(out); |
18912 | return out; |
18913 | } |
18914 | } |
18915 | |
18916 | at::Tensor & special_modified_bessel_i0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18917 | if (false) { |
18918 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18919 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18920 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18921 | auto self_meta = to_meta(self); |
18922 | auto out_meta = to_meta(out); |
18923 | at::AutoDispatchSkipFunctionalize func_guard; |
18924 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18925 | at::_ops::special_modified_bessel_i0_out::call(self_meta, out_meta); |
18926 | } |
18927 | |
18928 | at::Tensor self_; |
18929 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18930 | at::functionalization::impl::sync(self); |
18931 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18932 | } else { |
18933 | self_ = self; |
18934 | } |
18935 | |
18936 | at::Tensor out_; |
18937 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18938 | at::functionalization::impl::sync(out); |
18939 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18940 | } else { |
18941 | out_ = out; |
18942 | } |
18943 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18944 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18945 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18946 | TORCH_INTERNAL_ASSERT(false, |
18947 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18948 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18949 | } else { |
18950 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18951 | at::AutoDispatchSkipFunctionalize guard; |
18952 | at::Tensor tmp_output = at::_ops::special_modified_bessel_i0_out::call(self_, out_); |
18953 | return out;; |
18954 | } |
18955 | } else { |
18956 | at::Tensor tmp_output; |
18957 | { |
18958 | at::AutoDispatchSkipFunctionalize guard; |
18959 | tmp_output = at::_ops::special_modified_bessel_i0::call(self_); |
18960 | } |
18961 | at::functionalization::impl::replace_(out, tmp_output); |
18962 | at::functionalization::impl::commit_update(out); |
18963 | at::functionalization::impl::sync(out); |
18964 | return out; |
18965 | } |
18966 | } |
18967 | |
18968 | at::Tensor & special_shifted_chebyshev_polynomial_t_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { |
18969 | if (false) { |
18970 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18971 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18972 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18973 | auto x_meta = to_meta(x); |
18974 | auto n_meta = to_meta(n); |
18975 | auto out_meta = to_meta(out); |
18976 | at::AutoDispatchSkipFunctionalize func_guard; |
18977 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18978 | at::_ops::special_shifted_chebyshev_polynomial_t_out::call(x_meta, n_meta, out_meta); |
18979 | } |
18980 | |
18981 | at::Tensor x_; |
18982 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
18983 | at::functionalization::impl::sync(x); |
18984 | x_ = at::functionalization::impl::from_functional_tensor(x); |
18985 | } else { |
18986 | x_ = x; |
18987 | } |
18988 | |
18989 | at::Tensor n_; |
18990 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
18991 | at::functionalization::impl::sync(n); |
18992 | n_ = at::functionalization::impl::from_functional_tensor(n); |
18993 | } else { |
18994 | n_ = n; |
18995 | } |
18996 | |
18997 | at::Tensor out_; |
18998 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18999 | at::functionalization::impl::sync(out); |
19000 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19001 | } else { |
19002 | out_ = out; |
19003 | } |
19004 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19005 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) { |
19006 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19007 | TORCH_INTERNAL_ASSERT(false, |
19008 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19009 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19010 | } else { |
19011 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19012 | at::AutoDispatchSkipFunctionalize guard; |
19013 | at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_t_out::call(x_, n_, out_); |
19014 | return out;; |
19015 | } |
19016 | } else { |
19017 | at::Tensor tmp_output; |
19018 | { |
19019 | at::AutoDispatchSkipFunctionalize guard; |
19020 | tmp_output = at::_ops::special_shifted_chebyshev_polynomial_t::call(x_, n_); |
19021 | } |
19022 | at::functionalization::impl::replace_(out, tmp_output); |
19023 | at::functionalization::impl::commit_update(out); |
19024 | at::functionalization::impl::sync(out); |
19025 | return out; |
19026 | } |
19027 | } |
19028 | |
19029 | at::Tensor & special_shifted_chebyshev_polynomial_t_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
19030 | if (false) { |
19031 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19032 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19033 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19034 | auto n_meta = to_meta(n); |
19035 | auto out_meta = to_meta(out); |
19036 | at::AutoDispatchSkipFunctionalize func_guard; |
19037 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19038 | at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar_out::call(x, n_meta, out_meta); |
19039 | } |
19040 | |
19041 | at::Tensor n_; |
19042 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
19043 | at::functionalization::impl::sync(n); |
19044 | n_ = at::functionalization::impl::from_functional_tensor(n); |
19045 | } else { |
19046 | n_ = n; |
19047 | } |
19048 | |
19049 | at::Tensor out_; |
19050 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19051 | at::functionalization::impl::sync(out); |
19052 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19053 | } else { |
19054 | out_ = out; |
19055 | } |
19056 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19057 | if ((false || at::functionalization::impl::isFunctionalTensor(n))) { |
19058 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19059 | TORCH_INTERNAL_ASSERT(false, |
19060 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19061 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19062 | } else { |
19063 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19064 | at::AutoDispatchSkipFunctionalize guard; |
19065 | at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar_out::call(x, n_, out_); |
19066 | return out;; |
19067 | } |
19068 | } else { |
19069 | at::Tensor tmp_output; |
19070 | { |
19071 | at::AutoDispatchSkipFunctionalize guard; |
19072 | tmp_output = at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar::call(x, n_); |
19073 | } |
19074 | at::functionalization::impl::replace_(out, tmp_output); |
19075 | at::functionalization::impl::commit_update(out); |
19076 | at::functionalization::impl::sync(out); |
19077 | return out; |
19078 | } |
19079 | } |
19080 | |
19081 | at::Tensor & special_shifted_chebyshev_polynomial_t_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
19082 | if (false) { |
19083 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19084 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19085 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19086 | auto x_meta = to_meta(x); |
19087 | auto out_meta = to_meta(out); |
19088 | at::AutoDispatchSkipFunctionalize func_guard; |
19089 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19090 | at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar_out::call(x_meta, n, out_meta); |
19091 | } |
19092 | |
19093 | at::Tensor x_; |
19094 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
19095 | at::functionalization::impl::sync(x); |
19096 | x_ = at::functionalization::impl::from_functional_tensor(x); |
19097 | } else { |
19098 | x_ = x; |
19099 | } |
19100 | |
19101 | at::Tensor out_; |
19102 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19103 | at::functionalization::impl::sync(out); |
19104 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19105 | } else { |
19106 | out_ = out; |
19107 | } |
19108 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19109 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
19110 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19111 | TORCH_INTERNAL_ASSERT(false, |
19112 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19113 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19114 | } else { |
19115 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19116 | at::AutoDispatchSkipFunctionalize guard; |
19117 | at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar_out::call(x_, n, out_); |
19118 | return out;; |
19119 | } |
19120 | } else { |
19121 | at::Tensor tmp_output; |
19122 | { |
19123 | at::AutoDispatchSkipFunctionalize guard; |
19124 | tmp_output = at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar::call(x_, n); |
19125 | } |
19126 | at::functionalization::impl::replace_(out, tmp_output); |
19127 | at::functionalization::impl::commit_update(out); |
19128 | at::functionalization::impl::sync(out); |
19129 | return out; |
19130 | } |
19131 | } |
19132 | |
19133 | at::Tensor & special_shifted_chebyshev_polynomial_u_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { |
19134 | if (false) { |
19135 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19136 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19137 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19138 | auto x_meta = to_meta(x); |
19139 | auto n_meta = to_meta(n); |
19140 | auto out_meta = to_meta(out); |
19141 | at::AutoDispatchSkipFunctionalize func_guard; |
19142 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19143 | at::_ops::special_shifted_chebyshev_polynomial_u_out::call(x_meta, n_meta, out_meta); |
19144 | } |
19145 | |
19146 | at::Tensor x_; |
19147 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
19148 | at::functionalization::impl::sync(x); |
19149 | x_ = at::functionalization::impl::from_functional_tensor(x); |
19150 | } else { |
19151 | x_ = x; |
19152 | } |
19153 | |
19154 | at::Tensor n_; |
19155 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
19156 | at::functionalization::impl::sync(n); |
19157 | n_ = at::functionalization::impl::from_functional_tensor(n); |
19158 | } else { |
19159 | n_ = n; |
19160 | } |
19161 | |
19162 | at::Tensor out_; |
19163 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19164 | at::functionalization::impl::sync(out); |
19165 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19166 | } else { |
19167 | out_ = out; |
19168 | } |
19169 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19170 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) { |
19171 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19172 | TORCH_INTERNAL_ASSERT(false, |
19173 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19174 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19175 | } else { |
19176 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19177 | at::AutoDispatchSkipFunctionalize guard; |
19178 | at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_u_out::call(x_, n_, out_); |
19179 | return out;; |
19180 | } |
19181 | } else { |
19182 | at::Tensor tmp_output; |
19183 | { |
19184 | at::AutoDispatchSkipFunctionalize guard; |
19185 | tmp_output = at::_ops::special_shifted_chebyshev_polynomial_u::call(x_, n_); |
19186 | } |
19187 | at::functionalization::impl::replace_(out, tmp_output); |
19188 | at::functionalization::impl::commit_update(out); |
19189 | at::functionalization::impl::sync(out); |
19190 | return out; |
19191 | } |
19192 | } |
19193 | |
19194 | at::Tensor & special_shifted_chebyshev_polynomial_u_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
19195 | if (false) { |
19196 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19197 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19198 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19199 | auto n_meta = to_meta(n); |
19200 | auto out_meta = to_meta(out); |
19201 | at::AutoDispatchSkipFunctionalize func_guard; |
19202 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19203 | at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar_out::call(x, n_meta, out_meta); |
19204 | } |
19205 | |
19206 | at::Tensor n_; |
19207 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
19208 | at::functionalization::impl::sync(n); |
19209 | n_ = at::functionalization::impl::from_functional_tensor(n); |
19210 | } else { |
19211 | n_ = n; |
19212 | } |
19213 | |
19214 | at::Tensor out_; |
19215 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19216 | at::functionalization::impl::sync(out); |
19217 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19218 | } else { |
19219 | out_ = out; |
19220 | } |
19221 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19222 | if ((false || at::functionalization::impl::isFunctionalTensor(n))) { |
19223 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19224 | TORCH_INTERNAL_ASSERT(false, |
19225 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19226 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19227 | } else { |
19228 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19229 | at::AutoDispatchSkipFunctionalize guard; |
19230 | at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar_out::call(x, n_, out_); |
19231 | return out;; |
19232 | } |
19233 | } else { |
19234 | at::Tensor tmp_output; |
19235 | { |
19236 | at::AutoDispatchSkipFunctionalize guard; |
19237 | tmp_output = at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar::call(x, n_); |
19238 | } |
19239 | at::functionalization::impl::replace_(out, tmp_output); |
19240 | at::functionalization::impl::commit_update(out); |
19241 | at::functionalization::impl::sync(out); |
19242 | return out; |
19243 | } |
19244 | } |
19245 | |
19246 | at::Tensor & special_shifted_chebyshev_polynomial_u_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
19247 | if (false) { |
19248 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19249 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19250 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19251 | auto x_meta = to_meta(x); |
19252 | auto out_meta = to_meta(out); |
19253 | at::AutoDispatchSkipFunctionalize func_guard; |
19254 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19255 | at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar_out::call(x_meta, n, out_meta); |
19256 | } |
19257 | |
19258 | at::Tensor x_; |
19259 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
19260 | at::functionalization::impl::sync(x); |
19261 | x_ = at::functionalization::impl::from_functional_tensor(x); |
19262 | } else { |
19263 | x_ = x; |
19264 | } |
19265 | |
19266 | at::Tensor out_; |
19267 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19268 | at::functionalization::impl::sync(out); |
19269 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19270 | } else { |
19271 | out_ = out; |
19272 | } |
19273 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19274 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
19275 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19276 | TORCH_INTERNAL_ASSERT(false, |
19277 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19278 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19279 | } else { |
19280 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19281 | at::AutoDispatchSkipFunctionalize guard; |
19282 | at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar_out::call(x_, n, out_); |
19283 | return out;; |
19284 | } |
19285 | } else { |
19286 | at::Tensor tmp_output; |
19287 | { |
19288 | at::AutoDispatchSkipFunctionalize guard; |
19289 | tmp_output = at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar::call(x_, n); |
19290 | } |
19291 | at::functionalization::impl::replace_(out, tmp_output); |
19292 | at::functionalization::impl::commit_update(out); |
19293 | at::functionalization::impl::sync(out); |
19294 | return out; |
19295 | } |
19296 | } |
19297 | |
19298 | at::Tensor & special_shifted_chebyshev_polynomial_w_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { |
19299 | if (false) { |
19300 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19301 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19302 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19303 | auto x_meta = to_meta(x); |
19304 | auto n_meta = to_meta(n); |
19305 | auto out_meta = to_meta(out); |
19306 | at::AutoDispatchSkipFunctionalize func_guard; |
19307 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19308 | at::_ops::special_shifted_chebyshev_polynomial_w_out::call(x_meta, n_meta, out_meta); |
19309 | } |
19310 | |
19311 | at::Tensor x_; |
19312 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
19313 | at::functionalization::impl::sync(x); |
19314 | x_ = at::functionalization::impl::from_functional_tensor(x); |
19315 | } else { |
19316 | x_ = x; |
19317 | } |
19318 | |
19319 | at::Tensor n_; |
19320 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
19321 | at::functionalization::impl::sync(n); |
19322 | n_ = at::functionalization::impl::from_functional_tensor(n); |
19323 | } else { |
19324 | n_ = n; |
19325 | } |
19326 | |
19327 | at::Tensor out_; |
19328 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19329 | at::functionalization::impl::sync(out); |
19330 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19331 | } else { |
19332 | out_ = out; |
19333 | } |
19334 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19335 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) { |
19336 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19337 | TORCH_INTERNAL_ASSERT(false, |
19338 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19339 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19340 | } else { |
19341 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19342 | at::AutoDispatchSkipFunctionalize guard; |
19343 | at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_w_out::call(x_, n_, out_); |
19344 | return out;; |
19345 | } |
19346 | } else { |
19347 | at::Tensor tmp_output; |
19348 | { |
19349 | at::AutoDispatchSkipFunctionalize guard; |
19350 | tmp_output = at::_ops::special_shifted_chebyshev_polynomial_w::call(x_, n_); |
19351 | } |
19352 | at::functionalization::impl::replace_(out, tmp_output); |
19353 | at::functionalization::impl::commit_update(out); |
19354 | at::functionalization::impl::sync(out); |
19355 | return out; |
19356 | } |
19357 | } |
19358 | |
19359 | at::Tensor & special_shifted_chebyshev_polynomial_w_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
19360 | if (false) { |
19361 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19362 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19363 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19364 | auto n_meta = to_meta(n); |
19365 | auto out_meta = to_meta(out); |
19366 | at::AutoDispatchSkipFunctionalize func_guard; |
19367 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19368 | at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar_out::call(x, n_meta, out_meta); |
19369 | } |
19370 | |
19371 | at::Tensor n_; |
19372 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
19373 | at::functionalization::impl::sync(n); |
19374 | n_ = at::functionalization::impl::from_functional_tensor(n); |
19375 | } else { |
19376 | n_ = n; |
19377 | } |
19378 | |
19379 | at::Tensor out_; |
19380 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19381 | at::functionalization::impl::sync(out); |
19382 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19383 | } else { |
19384 | out_ = out; |
19385 | } |
19386 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19387 | if ((false || at::functionalization::impl::isFunctionalTensor(n))) { |
19388 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19389 | TORCH_INTERNAL_ASSERT(false, |
19390 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19391 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19392 | } else { |
19393 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19394 | at::AutoDispatchSkipFunctionalize guard; |
19395 | at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar_out::call(x, n_, out_); |
19396 | return out;; |
19397 | } |
19398 | } else { |
19399 | at::Tensor tmp_output; |
19400 | { |
19401 | at::AutoDispatchSkipFunctionalize guard; |
19402 | tmp_output = at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar::call(x, n_); |
19403 | } |
19404 | at::functionalization::impl::replace_(out, tmp_output); |
19405 | at::functionalization::impl::commit_update(out); |
19406 | at::functionalization::impl::sync(out); |
19407 | return out; |
19408 | } |
19409 | } |
19410 | |
19411 | at::Tensor & special_shifted_chebyshev_polynomial_w_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
19412 | if (false) { |
19413 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19414 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19415 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19416 | auto x_meta = to_meta(x); |
19417 | auto out_meta = to_meta(out); |
19418 | at::AutoDispatchSkipFunctionalize func_guard; |
19419 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19420 | at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar_out::call(x_meta, n, out_meta); |
19421 | } |
19422 | |
19423 | at::Tensor x_; |
19424 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
19425 | at::functionalization::impl::sync(x); |
19426 | x_ = at::functionalization::impl::from_functional_tensor(x); |
19427 | } else { |
19428 | x_ = x; |
19429 | } |
19430 | |
19431 | at::Tensor out_; |
19432 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19433 | at::functionalization::impl::sync(out); |
19434 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19435 | } else { |
19436 | out_ = out; |
19437 | } |
19438 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19439 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
19440 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19441 | TORCH_INTERNAL_ASSERT(false, |
19442 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19443 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19444 | } else { |
19445 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19446 | at::AutoDispatchSkipFunctionalize guard; |
19447 | at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar_out::call(x_, n, out_); |
19448 | return out;; |
19449 | } |
19450 | } else { |
19451 | at::Tensor tmp_output; |
19452 | { |
19453 | at::AutoDispatchSkipFunctionalize guard; |
19454 | tmp_output = at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar::call(x_, n); |
19455 | } |
19456 | at::functionalization::impl::replace_(out, tmp_output); |
19457 | at::functionalization::impl::commit_update(out); |
19458 | at::functionalization::impl::sync(out); |
19459 | return out; |
19460 | } |
19461 | } |
19462 | |
19463 | void _fused_adam_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) { |
19464 | if (false) { |
19465 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19466 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19467 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19468 | auto self_meta = to_meta(self); |
19469 | auto grads_meta = to_meta(grads); |
19470 | auto exp_avgs_meta = to_meta(exp_avgs); |
19471 | auto exp_avg_sqs_meta = to_meta(exp_avg_sqs); |
19472 | auto max_exp_avg_sqs_meta = to_meta(max_exp_avg_sqs); |
19473 | auto state_steps_meta = to_meta(state_steps); |
19474 | auto grad_scale_meta = to_meta(grad_scale); |
19475 | auto found_inf_meta = to_meta(found_inf); |
19476 | auto out_meta = to_meta(out); |
19477 | at::AutoDispatchSkipFunctionalize func_guard; |
19478 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19479 | at::_ops::_fused_adam_out::call(self_meta, grads_meta, exp_avgs_meta, exp_avg_sqs_meta, max_exp_avg_sqs_meta, state_steps_meta, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_meta, found_inf_meta, out_meta); |
19480 | } |
19481 | |
19482 | ::std::vector<at::Tensor> self_; |
19483 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19484 | at::functionalization::impl::sync(self); |
19485 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19486 | } else { |
19487 | self_ = self.vec(); |
19488 | } |
19489 | |
19490 | ::std::vector<at::Tensor> grads_; |
19491 | if (at::functionalization::impl::isFunctionalTensor(grads)) { |
19492 | at::functionalization::impl::sync(grads); |
19493 | grads_ = at::functionalization::impl::from_functional_tensor(grads); |
19494 | } else { |
19495 | grads_ = grads.vec(); |
19496 | } |
19497 | |
19498 | ::std::vector<at::Tensor> exp_avgs_; |
19499 | if (at::functionalization::impl::isFunctionalTensor(exp_avgs)) { |
19500 | at::functionalization::impl::sync(exp_avgs); |
19501 | exp_avgs_ = at::functionalization::impl::from_functional_tensor(exp_avgs); |
19502 | } else { |
19503 | exp_avgs_ = exp_avgs.vec(); |
19504 | } |
19505 | |
19506 | ::std::vector<at::Tensor> exp_avg_sqs_; |
19507 | if (at::functionalization::impl::isFunctionalTensor(exp_avg_sqs)) { |
19508 | at::functionalization::impl::sync(exp_avg_sqs); |
19509 | exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(exp_avg_sqs); |
19510 | } else { |
19511 | exp_avg_sqs_ = exp_avg_sqs.vec(); |
19512 | } |
19513 | |
19514 | ::std::vector<at::Tensor> max_exp_avg_sqs_; |
19515 | if (at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs)) { |
19516 | at::functionalization::impl::sync(max_exp_avg_sqs); |
19517 | max_exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs); |
19518 | } else { |
19519 | max_exp_avg_sqs_ = max_exp_avg_sqs.vec(); |
19520 | } |
19521 | |
19522 | ::std::vector<at::Tensor> state_steps_; |
19523 | if (at::functionalization::impl::isFunctionalTensor(state_steps)) { |
19524 | at::functionalization::impl::sync(state_steps); |
19525 | state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps); |
19526 | } else { |
19527 | state_steps_ = state_steps.vec(); |
19528 | } |
19529 | |
19530 | c10::optional<at::Tensor> grad_scale_; |
19531 | if (at::functionalization::impl::isFunctionalTensor(grad_scale)) { |
19532 | at::functionalization::impl::sync(grad_scale); |
19533 | grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale); |
19534 | } else { |
19535 | grad_scale_ = grad_scale; |
19536 | } |
19537 | |
19538 | c10::optional<at::Tensor> found_inf_; |
19539 | if (at::functionalization::impl::isFunctionalTensor(found_inf)) { |
19540 | at::functionalization::impl::sync(found_inf); |
19541 | found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf); |
19542 | } else { |
19543 | found_inf_ = found_inf; |
19544 | } |
19545 | |
19546 | ::std::vector<at::Tensor> out_; |
19547 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19548 | at::functionalization::impl::sync(out); |
19549 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19550 | } else { |
19551 | out_ = out.vec(); |
19552 | } |
19553 | if (!(true && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(exp_avgs) && at::functionalization::impl::isFunctionalTensor(exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(out))) { |
19554 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(state_steps) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) { |
19555 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19556 | TORCH_INTERNAL_ASSERT(false, |
19557 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19558 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19559 | } else { |
19560 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19561 | at::AutoDispatchSkipFunctionalize guard; |
19562 | at::_ops::_fused_adam_out::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_, out_); |
19563 | ; |
19564 | } |
19565 | } else { |
19566 | ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output; |
19567 | { |
19568 | at::AutoDispatchSkipFunctionalize guard; |
19569 | tmp_output = at::_ops::_fused_adam::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_); |
19570 | } |
19571 | at::functionalization::impl::replace_(grads, std::get<0>(tmp_output)); |
19572 | at::functionalization::impl::commit_update(grads); |
19573 | at::functionalization::impl::sync(grads); |
19574 | at::functionalization::impl::replace_(exp_avgs, std::get<1>(tmp_output)); |
19575 | at::functionalization::impl::commit_update(exp_avgs); |
19576 | at::functionalization::impl::sync(exp_avgs); |
19577 | at::functionalization::impl::replace_(exp_avg_sqs, std::get<2>(tmp_output)); |
19578 | at::functionalization::impl::commit_update(exp_avg_sqs); |
19579 | at::functionalization::impl::sync(exp_avg_sqs); |
19580 | at::functionalization::impl::replace_(max_exp_avg_sqs, std::get<3>(tmp_output)); |
19581 | at::functionalization::impl::commit_update(max_exp_avg_sqs); |
19582 | at::functionalization::impl::sync(max_exp_avg_sqs); |
19583 | at::functionalization::impl::replace_(out, std::get<4>(tmp_output)); |
19584 | at::functionalization::impl::commit_update(out); |
19585 | at::functionalization::impl::sync(out); |
19586 | |
19587 | } |
19588 | } |
19589 | |
19590 | void _fused_adam_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) { |
19591 | if (true) { |
19592 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19593 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19594 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19595 | auto self_meta = to_meta(self); |
19596 | auto grads_meta = to_meta(grads); |
19597 | auto exp_avgs_meta = to_meta(exp_avgs); |
19598 | auto exp_avg_sqs_meta = to_meta(exp_avg_sqs); |
19599 | auto max_exp_avg_sqs_meta = to_meta(max_exp_avg_sqs); |
19600 | auto state_steps_meta = to_meta(state_steps); |
19601 | auto grad_scale_meta = to_meta(grad_scale); |
19602 | auto found_inf_meta = to_meta(found_inf); |
19603 | at::AutoDispatchSkipFunctionalize func_guard; |
19604 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19605 | at::_ops::_fused_adam_::call(self_meta, grads_meta, exp_avgs_meta, exp_avg_sqs_meta, max_exp_avg_sqs_meta, state_steps_meta, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_meta, found_inf_meta); |
19606 | } |
19607 | |
19608 | ::std::vector<at::Tensor> self_; |
19609 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19610 | at::functionalization::impl::sync(self); |
19611 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19612 | } else { |
19613 | self_ = self.vec(); |
19614 | } |
19615 | |
19616 | ::std::vector<at::Tensor> grads_; |
19617 | if (at::functionalization::impl::isFunctionalTensor(grads)) { |
19618 | at::functionalization::impl::sync(grads); |
19619 | grads_ = at::functionalization::impl::from_functional_tensor(grads); |
19620 | } else { |
19621 | grads_ = grads.vec(); |
19622 | } |
19623 | |
19624 | ::std::vector<at::Tensor> exp_avgs_; |
19625 | if (at::functionalization::impl::isFunctionalTensor(exp_avgs)) { |
19626 | at::functionalization::impl::sync(exp_avgs); |
19627 | exp_avgs_ = at::functionalization::impl::from_functional_tensor(exp_avgs); |
19628 | } else { |
19629 | exp_avgs_ = exp_avgs.vec(); |
19630 | } |
19631 | |
19632 | ::std::vector<at::Tensor> exp_avg_sqs_; |
19633 | if (at::functionalization::impl::isFunctionalTensor(exp_avg_sqs)) { |
19634 | at::functionalization::impl::sync(exp_avg_sqs); |
19635 | exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(exp_avg_sqs); |
19636 | } else { |
19637 | exp_avg_sqs_ = exp_avg_sqs.vec(); |
19638 | } |
19639 | |
19640 | ::std::vector<at::Tensor> max_exp_avg_sqs_; |
19641 | if (at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs)) { |
19642 | at::functionalization::impl::sync(max_exp_avg_sqs); |
19643 | max_exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs); |
19644 | } else { |
19645 | max_exp_avg_sqs_ = max_exp_avg_sqs.vec(); |
19646 | } |
19647 | |
19648 | ::std::vector<at::Tensor> state_steps_; |
19649 | if (at::functionalization::impl::isFunctionalTensor(state_steps)) { |
19650 | at::functionalization::impl::sync(state_steps); |
19651 | state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps); |
19652 | } else { |
19653 | state_steps_ = state_steps.vec(); |
19654 | } |
19655 | |
19656 | c10::optional<at::Tensor> grad_scale_; |
19657 | if (at::functionalization::impl::isFunctionalTensor(grad_scale)) { |
19658 | at::functionalization::impl::sync(grad_scale); |
19659 | grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale); |
19660 | } else { |
19661 | grad_scale_ = grad_scale; |
19662 | } |
19663 | |
19664 | c10::optional<at::Tensor> found_inf_; |
19665 | if (at::functionalization::impl::isFunctionalTensor(found_inf)) { |
19666 | at::functionalization::impl::sync(found_inf); |
19667 | found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf); |
19668 | } else { |
19669 | found_inf_ = found_inf; |
19670 | } |
19671 | if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(exp_avgs) && at::functionalization::impl::isFunctionalTensor(exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs))) { |
19672 | if ((false || at::functionalization::impl::isFunctionalTensor(state_steps) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) { |
19673 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19674 | TORCH_INTERNAL_ASSERT(false, |
19675 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19676 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19677 | } else { |
19678 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19679 | at::AutoDispatchSkipFunctionalize guard; |
19680 | at::_ops::_fused_adam_::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_); |
19681 | ; |
19682 | } |
19683 | } else { |
19684 | ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output; |
19685 | { |
19686 | at::AutoDispatchSkipFunctionalize guard; |
19687 | tmp_output = at::_ops::_fused_adam::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_); |
19688 | } |
19689 | at::functionalization::impl::replace_(self, std::get<0>(tmp_output)); |
19690 | at::functionalization::impl::commit_update(self); |
19691 | at::functionalization::impl::sync(self); |
19692 | at::functionalization::impl::replace_(grads, std::get<1>(tmp_output)); |
19693 | at::functionalization::impl::commit_update(grads); |
19694 | at::functionalization::impl::sync(grads); |
19695 | at::functionalization::impl::replace_(exp_avgs, std::get<2>(tmp_output)); |
19696 | at::functionalization::impl::commit_update(exp_avgs); |
19697 | at::functionalization::impl::sync(exp_avgs); |
19698 | at::functionalization::impl::replace_(exp_avg_sqs, std::get<3>(tmp_output)); |
19699 | at::functionalization::impl::commit_update(exp_avg_sqs); |
19700 | at::functionalization::impl::sync(exp_avg_sqs); |
19701 | at::functionalization::impl::replace_(max_exp_avg_sqs, std::get<4>(tmp_output)); |
19702 | at::functionalization::impl::commit_update(max_exp_avg_sqs); |
19703 | at::functionalization::impl::sync(max_exp_avg_sqs); |
19704 | |
19705 | } |
19706 | } |
19707 | |
19708 | at::Tensor _conj(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { |
19709 | |
19710 | at::Tensor self_; |
19711 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19712 | |
19713 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19714 | } else { |
19715 | self_ = self; |
19716 | } |
19717 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
19718 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
19719 | at::AutoDispatchSkipFunctionalize guard; |
19720 | return at::_ops::_conj::call(self_); |
19721 | } |
19722 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
19723 | auto compute_reference_meta = |
19724 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
19725 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
19726 | at::Tensor reference_tensor_output; |
19727 | if (compute_reference_meta) { |
19728 | auto self_meta = to_meta(self); |
19729 | at::AutoDispatchSkipFunctionalize func_guard; |
19730 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19731 | reference_tensor_output = at::_ops::_conj::call(self_meta); |
19732 | } |
19733 | at::Tensor tmp_output; |
19734 | { |
19735 | at::AutoDispatchSkipFunctionalize guard; |
19736 | if (reapply_views) { |
19737 | tmp_output = at::_ops::_conj::call(self_); |
19738 | } else { |
19739 | tmp_output = at::_ops::_conj_copy::call(self_); |
19740 | } |
19741 | } |
19742 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
19743 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
19744 | if (reapply_views) { |
19745 | return at::_ops::_conj::call(base); |
19746 | } else { |
19747 | return at::_ops::_conj_copy::call(base); |
19748 | } |
19749 | }, |
19750 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
19751 | return at::functionalization::FunctionalInverses::_conj_copy_inverse(base, mutated_view, reapply_views); |
19752 | } |
19753 | ); |
19754 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
19755 | // See Note [Propagating strides in the functionalization pass] |
19756 | if (compute_reference_meta) { |
19757 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
19758 | } |
19759 | return out; |
19760 | } |
19761 | |
19762 | at::Tensor as_strided(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) { |
19763 | |
19764 | at::Tensor self_; |
19765 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19766 | |
19767 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19768 | } else { |
19769 | self_ = self; |
19770 | } |
19771 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
19772 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
19773 | at::AutoDispatchSkipFunctionalize guard; |
19774 | return at::_ops::as_strided::call(self_, size, stride, storage_offset); |
19775 | } |
19776 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
19777 | auto compute_reference_meta = |
19778 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
19779 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
19780 | at::Tensor reference_tensor_output; |
19781 | if (compute_reference_meta) { |
19782 | auto self_meta = to_meta(self); |
19783 | at::AutoDispatchSkipFunctionalize func_guard; |
19784 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19785 | reference_tensor_output = at::_ops::as_strided::call(self_meta, size, stride, storage_offset); |
19786 | } |
19787 | at::Tensor tmp_output; |
19788 | { |
19789 | at::AutoDispatchSkipFunctionalize guard; |
19790 | if (reapply_views) { |
19791 | tmp_output = at::_ops::as_strided::call(self_, size, stride, storage_offset); |
19792 | } else { |
19793 | tmp_output = at::_ops::as_strided_copy::call(self_, size, stride, storage_offset); |
19794 | } |
19795 | } |
19796 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
19797 | [reapply_views = reapply_views, size = size.vec(), stride = stride.vec(), storage_offset = storage_offset](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
19798 | if (reapply_views) { |
19799 | return at::_ops::as_strided::call(base, size, stride, storage_offset); |
19800 | } else { |
19801 | return at::_ops::as_strided_copy::call(base, size, stride, storage_offset); |
19802 | } |
19803 | }, |
19804 | [reapply_views = reapply_views, size = size.vec(), stride = stride.vec(), storage_offset = storage_offset](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
19805 | return at::functionalization::FunctionalInverses::as_strided_copy_inverse(base, mutated_view, reapply_views, size, stride, storage_offset); |
19806 | } |
19807 | ); |
19808 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
19809 | // See Note [Propagating strides in the functionalization pass] |
19810 | if (compute_reference_meta) { |
19811 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
19812 | } |
19813 | return out; |
19814 | } |
19815 | |
19816 | const at::Tensor & as_strided_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) { |
19817 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
19818 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
19819 | |
19820 | at::Tensor self_; |
19821 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19822 | |
19823 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19824 | } else { |
19825 | self_ = self; |
19826 | } |
19827 | at::AutoDispatchSkipFunctionalize guard; |
19828 | return at::_ops::as_strided_::call(self_, size, stride, storage_offset); |
19829 | } |
19830 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
19831 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
19832 | [reapply_views = reapply_views, size = size.vec(), stride = stride.vec(), storage_offset = storage_offset](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
19833 | if (reapply_views) { |
19834 | return at::_ops::as_strided::call(base, size, stride, storage_offset); |
19835 | } else { |
19836 | return at::_ops::as_strided_copy::call(base, size, stride, storage_offset); |
19837 | } |
19838 | }, |
19839 | [reapply_views = reapply_views, size = size.vec(), stride = stride.vec(), storage_offset = storage_offset](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
19840 | return at::functionalization::FunctionalInverses::as_strided_copy_inverse(base, mutated_view, reapply_views, size, stride, storage_offset); |
19841 | } |
19842 | ); |
19843 | auto compute_reference_meta = |
19844 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
19845 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
19846 | at::Tensor reference_tensor_output; |
19847 | if (compute_reference_meta) { |
19848 | auto self_meta = to_meta(self); |
19849 | at::AutoDispatchSkipFunctionalize func_guard; |
19850 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19851 | reference_tensor_output = at::_ops::as_strided_::call(self_meta, size, stride, storage_offset); |
19852 | } |
19853 | // This function adds the above view meta to the current tensor and replays them off the base, |
19854 | // mutating the size/stride info of the current FunctionalTensorWrapper. |
19855 | // Because of this, we need to make sure to run the reference shape function above, |
19856 | // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides) |
19857 | at::functionalization::impl::mutate_view_meta(self, view_meta); |
19858 | // See Note [Propagating strides in the functionalization pass] |
19859 | // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely |
19860 | // on a reference implementation here (instead of relying on the output from the forward lambda |
19861 | // having the correct stride info) |
19862 | if (compute_reference_meta) { |
19863 | at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output); |
19864 | } |
19865 | return self; |
19866 | } |
19867 | |
19868 | at::Tensor _sparse_broadcast_to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { |
19869 | |
19870 | at::Tensor self_; |
19871 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19872 | |
19873 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19874 | } else { |
19875 | self_ = self; |
19876 | } |
19877 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
19878 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
19879 | at::AutoDispatchSkipFunctionalize guard; |
19880 | return at::_ops::_sparse_broadcast_to::call(self_, size); |
19881 | } |
19882 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
19883 | auto compute_reference_meta = |
19884 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
19885 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
19886 | at::Tensor reference_tensor_output; |
19887 | if (compute_reference_meta) { |
19888 | auto self_meta = to_meta(self); |
19889 | at::AutoDispatchSkipFunctionalize func_guard; |
19890 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19891 | reference_tensor_output = at::_ops::_sparse_broadcast_to::call(self_meta, size); |
19892 | } |
19893 | at::Tensor tmp_output; |
19894 | { |
19895 | at::AutoDispatchSkipFunctionalize guard; |
19896 | if (reapply_views) { |
19897 | tmp_output = at::_ops::_sparse_broadcast_to::call(self_, size); |
19898 | } else { |
19899 | tmp_output = at::_ops::_sparse_broadcast_to_copy::call(self_, size); |
19900 | } |
19901 | } |
19902 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
19903 | [reapply_views = reapply_views, size = size.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
19904 | if (reapply_views) { |
19905 | return at::_ops::_sparse_broadcast_to::call(base, size); |
19906 | } else { |
19907 | return at::_ops::_sparse_broadcast_to_copy::call(base, size); |
19908 | } |
19909 | }, |
19910 | [reapply_views = reapply_views, size = size.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
19911 | return at::functionalization::FunctionalInverses::_sparse_broadcast_to_copy_inverse(base, mutated_view, reapply_views, size); |
19912 | } |
19913 | ); |
19914 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
19915 | // See Note [Propagating strides in the functionalization pass] |
19916 | if (compute_reference_meta) { |
19917 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
19918 | } |
19919 | return out; |
19920 | } |
19921 | |
19922 | at::Tensor permute(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) { |
19923 | |
19924 | at::Tensor self_; |
19925 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19926 | |
19927 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19928 | } else { |
19929 | self_ = self; |
19930 | } |
19931 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
19932 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
19933 | at::AutoDispatchSkipFunctionalize guard; |
19934 | return at::_ops::permute::call(self_, dims); |
19935 | } |
19936 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
19937 | auto compute_reference_meta = |
19938 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
19939 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
19940 | at::Tensor reference_tensor_output; |
19941 | if (compute_reference_meta) { |
19942 | auto self_meta = to_meta(self); |
19943 | at::AutoDispatchSkipFunctionalize func_guard; |
19944 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19945 | reference_tensor_output = at::_ops::permute::call(self_meta, dims); |
19946 | } |
19947 | at::Tensor tmp_output; |
19948 | { |
19949 | at::AutoDispatchSkipFunctionalize guard; |
19950 | if (reapply_views) { |
19951 | tmp_output = at::_ops::permute::call(self_, dims); |
19952 | } else { |
19953 | tmp_output = at::_ops::permute_copy::call(self_, dims); |
19954 | } |
19955 | } |
19956 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
19957 | [reapply_views = reapply_views, dims = dims.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
19958 | if (reapply_views) { |
19959 | return at::_ops::permute::call(base, dims); |
19960 | } else { |
19961 | return at::_ops::permute_copy::call(base, dims); |
19962 | } |
19963 | }, |
19964 | [reapply_views = reapply_views, dims = dims.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
19965 | return at::functionalization::FunctionalInverses::permute_copy_inverse(base, mutated_view, reapply_views, dims); |
19966 | } |
19967 | ); |
19968 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
19969 | // See Note [Propagating strides in the functionalization pass] |
19970 | if (compute_reference_meta) { |
19971 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
19972 | } |
19973 | return out; |
19974 | } |
19975 | |
19976 | at::Tensor _reshape_alias(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
19977 | |
19978 | at::Tensor self_; |
19979 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19980 | |
19981 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19982 | } else { |
19983 | self_ = self; |
19984 | } |
19985 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
19986 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
19987 | at::AutoDispatchSkipFunctionalize guard; |
19988 | return at::_ops::_reshape_alias::call(self_, size, stride); |
19989 | } |
19990 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
19991 | auto compute_reference_meta = |
19992 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
19993 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
19994 | at::Tensor reference_tensor_output; |
19995 | if (compute_reference_meta) { |
19996 | auto self_meta = to_meta(self); |
19997 | at::AutoDispatchSkipFunctionalize func_guard; |
19998 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19999 | reference_tensor_output = at::_ops::_reshape_alias::call(self_meta, size, stride); |
20000 | } |
20001 | at::Tensor tmp_output; |
20002 | { |
20003 | at::AutoDispatchSkipFunctionalize guard; |
20004 | if (reapply_views) { |
20005 | tmp_output = at::_ops::_reshape_alias::call(self_, size, stride); |
20006 | } else { |
20007 | tmp_output = at::_ops::_reshape_alias_copy::call(self_, size, stride); |
20008 | } |
20009 | } |
20010 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
20011 | [reapply_views = reapply_views, size = size.vec(), stride = stride.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
20012 | if (reapply_views) { |
20013 | return at::_ops::_reshape_alias::call(base, size, stride); |
20014 | } else { |
20015 | return at::_ops::_reshape_alias_copy::call(base, size, stride); |
20016 | } |
20017 | }, |
20018 | [reapply_views = reapply_views, size = size.vec(), stride = stride.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
20019 | return at::functionalization::FunctionalInverses::_reshape_alias_copy_inverse(base, mutated_view, reapply_views, size, stride); |
20020 | } |
20021 | ); |
20022 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
20023 | // See Note [Propagating strides in the functionalization pass] |
20024 | if (compute_reference_meta) { |
20025 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
20026 | } |
20027 | return out; |
20028 | } |
20029 | |
20030 | at::Tensor detach(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { |
20031 | |
20032 | at::Tensor self_; |
20033 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20034 | |
20035 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20036 | } else { |
20037 | self_ = self; |
20038 | } |
20039 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
20040 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
20041 | at::AutoDispatchSkipFunctionalize guard; |
20042 | return at::_ops::detach::call(self_); |
20043 | } |
20044 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
20045 | auto compute_reference_meta = |
20046 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
20047 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
20048 | at::Tensor reference_tensor_output; |
20049 | if (compute_reference_meta) { |
20050 | auto self_meta = to_meta(self); |
20051 | at::AutoDispatchSkipFunctionalize func_guard; |
20052 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20053 | reference_tensor_output = at::_ops::detach::call(self_meta); |
20054 | } |
20055 | at::Tensor tmp_output; |
20056 | { |
20057 | at::AutoDispatchSkipFunctionalize guard; |
20058 | if (reapply_views) { |
20059 | tmp_output = at::_ops::detach::call(self_); |
20060 | } else { |
20061 | tmp_output = at::_ops::detach_copy::call(self_); |
20062 | } |
20063 | } |
20064 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
20065 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
20066 | if (reapply_views) { |
20067 | return at::_ops::detach::call(base); |
20068 | } else { |
20069 | return at::_ops::detach_copy::call(base); |
20070 | } |
20071 | }, |
20072 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
20073 | return at::functionalization::FunctionalInverses::detach_copy_inverse(base, mutated_view, reapply_views); |
20074 | } |
20075 | ); |
20076 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
20077 | // See Note [Propagating strides in the functionalization pass] |
20078 | if (compute_reference_meta) { |
20079 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
20080 | } |
20081 | return out; |
20082 | } |
20083 | |
20084 | at::Tensor & detach_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
20085 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
20086 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
20087 | |
20088 | at::Tensor self_; |
20089 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20090 | |
20091 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20092 | } else { |
20093 | self_ = self; |
20094 | } |
20095 | at::AutoDispatchSkipFunctionalize guard; |
20096 | return at::_ops::detach_::call(self_); |
20097 | } |
20098 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
20099 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
20100 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
20101 | if (reapply_views) { |
20102 | return at::_ops::detach::call(base); |
20103 | } else { |
20104 | return at::_ops::detach_copy::call(base); |
20105 | } |
20106 | }, |
20107 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
20108 | return at::functionalization::FunctionalInverses::detach_copy_inverse(base, mutated_view, reapply_views); |
20109 | } |
20110 | ); |
20111 | auto compute_reference_meta = |
20112 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
20113 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
20114 | at::Tensor reference_tensor_output; |
20115 | if (compute_reference_meta) { |
20116 | auto self_meta = to_meta(self); |
20117 | at::AutoDispatchSkipFunctionalize func_guard; |
20118 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20119 | reference_tensor_output = at::_ops::detach_::call(self_meta); |
20120 | } |
20121 | // This function adds the above view meta to the current tensor and replays them off the base, |
20122 | // mutating the size/stride info of the current FunctionalTensorWrapper. |
20123 | // Because of this, we need to make sure to run the reference shape function above, |
20124 | // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides) |
20125 | at::functionalization::impl::mutate_view_meta(self, view_meta); |
20126 | // See Note [Propagating strides in the functionalization pass] |
20127 | // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely |
20128 | // on a reference implementation here (instead of relying on the output from the forward lambda |
20129 | // having the correct stride info) |
20130 | if (compute_reference_meta) { |
20131 | at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output); |
20132 | } |
20133 | return self; |
20134 | } |
20135 | |
20136 | ::std::vector<at::Tensor> split_Tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim) { |
20137 | |
20138 | at::Tensor self_; |
20139 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20140 | |
20141 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20142 | } else { |
20143 | self_ = self; |
20144 | } |
20145 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
20146 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
20147 | at::AutoDispatchSkipFunctionalize guard; |
20148 | return at::_ops::split_Tensor::call(self_, split_size, dim); |
20149 | } |
20150 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
20151 | auto compute_reference_meta = |
20152 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
20153 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
20154 | ::std::vector<at::Tensor> reference_tensor_output; |
20155 | if (compute_reference_meta) { |
20156 | auto self_meta = to_meta(self); |
20157 | at::AutoDispatchSkipFunctionalize func_guard; |
20158 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20159 | reference_tensor_output = at::_ops::split_Tensor::call(self_meta, split_size, dim); |
20160 | } |
20161 | ::std::vector<at::Tensor> tmp_output; |
20162 | { |
20163 | at::AutoDispatchSkipFunctionalize guard; |
20164 | if (reapply_views) { |
20165 | tmp_output = at::_ops::split_Tensor::call(self_, split_size, dim); |
20166 | } else { |
20167 | tmp_output = at::_ops::split_copy_Tensor::call(self_, split_size, dim); |
20168 | } |
20169 | } |
20170 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
20171 | [reapply_views = reapply_views, split_size = split_size, dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
20172 | if (reapply_views) { |
20173 | return at::_ops::split_Tensor::call(base, split_size, dim)[mutated_view_idx]; |
20174 | } else { |
20175 | return at::_ops::split_copy_Tensor::call(base, split_size, dim)[mutated_view_idx]; |
20176 | } |
20177 | }, |
20178 | [reapply_views = reapply_views, split_size = split_size, dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
20179 | return at::functionalization::FunctionalInverses::split_copy_Tensor_inverse(base, mutated_view, reapply_views, mutated_view_idx, split_size, dim); |
20180 | } |
20181 | ); |
20182 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
20183 | // See Note [Propagating strides in the functionalization pass] |
20184 | if (compute_reference_meta) { |
20185 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
20186 | } |
20187 | return out; |
20188 | } |
20189 | |
20190 | at::Tensor values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { |
20191 | |
20192 | at::Tensor self_; |
20193 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20194 | |
20195 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20196 | } else { |
20197 | self_ = self; |
20198 | } |
20199 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
20200 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
20201 | at::AutoDispatchSkipFunctionalize guard; |
20202 | return at::_ops::values::call(self_); |
20203 | } |
20204 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
20205 | auto compute_reference_meta = |
20206 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
20207 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
20208 | at::Tensor reference_tensor_output; |
20209 | if (compute_reference_meta) { |
20210 | auto self_meta = to_meta(self); |
20211 | at::AutoDispatchSkipFunctionalize func_guard; |
20212 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20213 | reference_tensor_output = at::_ops::values::call(self_meta); |
20214 | } |
20215 | at::Tensor tmp_output; |
20216 | { |
20217 | at::AutoDispatchSkipFunctionalize guard; |
20218 | if (reapply_views) { |
20219 | tmp_output = at::_ops::values::call(self_); |
20220 | } else { |
20221 | tmp_output = at::_ops::values_copy::call(self_); |
20222 | } |
20223 | } |
20224 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
20225 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
20226 | if (reapply_views) { |
20227 | return at::_ops::values::call(base); |
20228 | } else { |
20229 | return at::_ops::values_copy::call(base); |
20230 | } |
20231 | }, |
20232 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
20233 | return at::functionalization::FunctionalInverses::values_copy_inverse(base, mutated_view, reapply_views); |
20234 | } |
20235 | ); |
20236 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
20237 | // See Note [Propagating strides in the functionalization pass] |
20238 | if (compute_reference_meta) { |
20239 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
20240 | } |
20241 | return out; |
20242 | } |
20243 | |
20244 | at::Tensor row_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { |
20245 | |
20246 | at::Tensor self_; |
20247 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20248 | |
20249 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20250 | } else { |
20251 | self_ = self; |
20252 | } |
20253 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
20254 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
20255 | at::AutoDispatchSkipFunctionalize guard; |
20256 | return at::_ops::row_indices::call(self_); |
20257 | } |
20258 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
20259 | auto compute_reference_meta = |
20260 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
20261 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
20262 | at::Tensor reference_tensor_output; |
20263 | if (compute_reference_meta) { |
20264 | auto self_meta = to_meta(self); |
20265 | at::AutoDispatchSkipFunctionalize func_guard; |
20266 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20267 | reference_tensor_output = at::_ops::row_indices::call(self_meta); |
20268 | } |
20269 | at::Tensor tmp_output; |
20270 | { |
20271 | at::AutoDispatchSkipFunctionalize guard; |
20272 | if (reapply_views) { |
20273 | tmp_output = at::_ops::row_indices::call(self_); |
20274 | } else { |
20275 | tmp_output = at::_ops::row_indices_copy::call(self_); |
20276 | } |
20277 | } |
20278 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
20279 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
20280 | if (reapply_views) { |
20281 | return at::_ops::row_indices::call(base); |
20282 | } else { |
20283 | return at::_ops::row_indices_copy::call(base); |
20284 | } |
20285 | }, |
20286 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
20287 | return at::functionalization::FunctionalInverses::row_indices_copy_inverse(base, mutated_view, reapply_views); |
20288 | } |
20289 | ); |
20290 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
20291 | // See Note [Propagating strides in the functionalization pass] |
20292 | if (compute_reference_meta) { |
20293 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
20294 | } |
20295 | return out; |
20296 | } |
20297 | |
20298 | at::Tensor lift_fresh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { |
20299 | |
20300 | at::Tensor self_; |
20301 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20302 | |
20303 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20304 | } else { |
20305 | self_ = self; |
20306 | } |
20307 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
20308 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
20309 | at::AutoDispatchSkipFunctionalize guard; |
20310 | return at::_ops::lift_fresh::call(self_); |
20311 | } |
20312 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
20313 | auto compute_reference_meta = |
20314 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
20315 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
20316 | at::Tensor reference_tensor_output; |
20317 | if (compute_reference_meta) { |
20318 | auto self_meta = to_meta(self); |
20319 | at::AutoDispatchSkipFunctionalize func_guard; |
20320 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20321 | reference_tensor_output = at::_ops::lift_fresh::call(self_meta); |
20322 | } |
20323 | at::Tensor tmp_output; |
20324 | { |
20325 | at::AutoDispatchSkipFunctionalize guard; |
20326 | if (reapply_views) { |
20327 | tmp_output = at::_ops::lift_fresh::call(self_); |
20328 | } else { |
20329 | tmp_output = at::_ops::lift_fresh_copy::call(self_); |
20330 | } |
20331 | } |
20332 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
20333 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
20334 | if (reapply_views) { |
20335 | return at::_ops::lift_fresh::call(base); |
20336 | } else { |
20337 | return at::_ops::lift_fresh_copy::call(base); |
20338 | } |
20339 | }, |
20340 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
20341 | return at::functionalization::FunctionalInverses::lift_fresh_copy_inverse(base, mutated_view, reapply_views); |
20342 | } |
20343 | ); |
20344 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
20345 | // See Note [Propagating strides in the functionalization pass] |
20346 | if (compute_reference_meta) { |
20347 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
20348 | } |
20349 | return out; |
20350 | } |
20351 | |
20352 | at::Tensor _test_autograd_multiple_dispatch_view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { |
20353 | |
20354 | at::Tensor self_; |
20355 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20356 | |
20357 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20358 | } else { |
20359 | self_ = self; |
20360 | } |
20361 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
20362 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
20363 | at::AutoDispatchSkipFunctionalize guard; |
20364 | return at::_ops::_test_autograd_multiple_dispatch_view::call(self_); |
20365 | } |
20366 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
20367 | auto compute_reference_meta = |
20368 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
20369 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
20370 | at::Tensor reference_tensor_output; |
20371 | if (compute_reference_meta) { |
20372 | auto self_meta = to_meta(self); |
20373 | at::AutoDispatchSkipFunctionalize func_guard; |
20374 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20375 | reference_tensor_output = at::_ops::_test_autograd_multiple_dispatch_view::call(self_meta); |
20376 | } |
20377 | at::Tensor tmp_output; |
20378 | { |
20379 | at::AutoDispatchSkipFunctionalize guard; |
20380 | if (reapply_views) { |
20381 | tmp_output = at::_ops::_test_autograd_multiple_dispatch_view::call(self_); |
20382 | } else { |
20383 | tmp_output = at::_ops::_test_autograd_multiple_dispatch_view_copy::call(self_); |
20384 | } |
20385 | } |
20386 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
20387 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
20388 | if (reapply_views) { |
20389 | return at::_ops::_test_autograd_multiple_dispatch_view::call(base); |
20390 | } else { |
20391 | return at::_ops::_test_autograd_multiple_dispatch_view_copy::call(base); |
20392 | } |
20393 | }, |
20394 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
20395 | return at::functionalization::FunctionalInverses::_test_autograd_multiple_dispatch_view_copy_inverse(base, mutated_view, reapply_views); |
20396 | } |
20397 | ); |
20398 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
20399 | // See Note [Propagating strides in the functionalization pass] |
20400 | if (compute_reference_meta) { |
20401 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
20402 | } |
20403 | return out; |
20404 | } |
20405 | |
20406 | } // namespace functionalization |
20407 | |
20408 | namespace { |
20409 | |
20410 | TORCH_LIBRARY_IMPL(aten, Functionalize, m) { |
20411 | m.impl("_cudnn_ctc_loss.out" , TORCH_FN(functionalization::_cudnn_ctc_loss_out_out)); |
20412 | m.impl("_cudnn_rnn.out" , TORCH_FN(functionalization::_cudnn_rnn_out_out)); |
20413 | m.impl("_cudnn_rnn_backward.out" , TORCH_FN(functionalization::_cudnn_rnn_backward_out_out)); |
20414 | m.impl("_fused_dropout.out" , TORCH_FN(functionalization::_fused_dropout_out_out)); |
20415 | m.impl("conj_physical" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::conj_physical)); |
20416 | m.impl("conj_physical.out" , TORCH_FN(functionalization::conj_physical_out_out)); |
20417 | m.impl("conj_physical_" , TORCH_FN(functionalization::conj_physical_)); |
20418 | m.impl("add.out" , TORCH_FN(functionalization::add_out_out)); |
20419 | m.impl("add_.Tensor" , TORCH_FN(functionalization::add__Tensor)); |
20420 | m.impl("add.Scalar_out" , TORCH_FN(functionalization::add_out_Scalar_out)); |
20421 | m.impl("add_.Scalar" , TORCH_FN(functionalization::add__Scalar)); |
20422 | m.impl("addmv.out" , TORCH_FN(functionalization::addmv_out_out)); |
20423 | m.impl("addmv_" , TORCH_FN(functionalization::addmv_)); |
20424 | m.impl("addr.out" , TORCH_FN(functionalization::addr_out_out)); |
20425 | m.impl("addr_" , TORCH_FN(functionalization::addr_)); |
20426 | m.impl("all.out" , TORCH_FN(functionalization::all_out_out)); |
20427 | m.impl("all.dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, bool keepdim)>(at::native::all)); |
20428 | m.impl("all.dimname_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out)>(at::native::all_out)); |
20429 | m.impl("argmax.out" , TORCH_FN(functionalization::argmax_out_out)); |
20430 | m.impl("atan.out" , TORCH_FN(functionalization::atan_out_out)); |
20431 | m.impl("atan_" , TORCH_FN(functionalization::atan_)); |
20432 | m.impl("bartlett_window.out" , TORCH_FN(functionalization::bartlett_window_out_out)); |
20433 | m.impl("bartlett_window.periodic_out" , TORCH_FN(functionalization::bartlett_window_out_periodic_out)); |
20434 | m.impl("binary_cross_entropy.out" , TORCH_FN(functionalization::binary_cross_entropy_out_out)); |
20435 | m.impl("bitwise_not.out" , TORCH_FN(functionalization::bitwise_not_out_out)); |
20436 | m.impl("bitwise_not_" , TORCH_FN(functionalization::bitwise_not_)); |
20437 | m.impl("logical_xor.out" , TORCH_FN(functionalization::logical_xor_out_out)); |
20438 | m.impl("logical_xor_" , TORCH_FN(functionalization::logical_xor_)); |
20439 | m.impl("blackman_window.out" , TORCH_FN(functionalization::blackman_window_out_out)); |
20440 | m.impl("blackman_window.periodic_out" , TORCH_FN(functionalization::blackman_window_out_periodic_out)); |
20441 | m.impl("clamp.out" , TORCH_FN(functionalization::clamp_out_out)); |
20442 | m.impl("clamp_" , TORCH_FN(functionalization::clamp_)); |
20443 | m.impl("clamp.Tensor_out" , TORCH_FN(functionalization::clamp_out_Tensor_out)); |
20444 | m.impl("clamp_.Tensor" , TORCH_FN(functionalization::clamp__Tensor)); |
20445 | m.impl("_convolution.out" , TORCH_FN(functionalization::_convolution_out_out)); |
20446 | m.impl("copy.out" , TORCH_FN(functionalization::copy_out_out)); |
20447 | m.impl("copy_" , TORCH_FN(functionalization::copy_)); |
20448 | m.impl("cudnn_convolution.out" , TORCH_FN(functionalization::cudnn_convolution_out_out)); |
20449 | m.impl("divide.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::divide)); |
20450 | m.impl("divide.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::divide_out)); |
20451 | m.impl("divide_.Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other)>(at::native::divide_)); |
20452 | m.impl("divide.Tensor_mode" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode)>(at::native::divide)); |
20453 | m.impl("divide.out_mode" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out)>(at::native::divide_out)); |
20454 | m.impl("divide_.Tensor_mode" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode)>(at::native::divide_)); |
20455 | m.impl("true_divide.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::true_divide)); |
20456 | m.impl("true_divide.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::true_divide_out)); |
20457 | m.impl("true_divide_.Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other)>(at::native::true_divide_)); |
20458 | m.impl("dot.out" , TORCH_FN(functionalization::dot_out_out)); |
20459 | m.impl("vdot.out" , TORCH_FN(functionalization::vdot_out_out)); |
20460 | m.impl("row_stack" , static_cast<at::Tensor (*)(at::TensorList tensors)>(at::native::row_stack)); |
20461 | m.impl("row_stack.out" , static_cast<at::Tensor & (*)(at::TensorList tensors, at::Tensor & out)>(at::native::row_stack_out)); |
20462 | m.impl("new_empty.out" , TORCH_FN(functionalization::new_empty_out_out)); |
20463 | m.impl("new_empty_strided.out" , TORCH_FN(functionalization::new_empty_strided_out_out)); |
20464 | m.impl("_empty_affine_quantized.out" , TORCH_FN(functionalization::_empty_affine_quantized_out_out)); |
20465 | m.impl("_resize_output.out" , TORCH_FN(functionalization::_resize_output_out_out)); |
20466 | m.impl("_resize_output_" , TORCH_FN(functionalization::_resize_output_)); |
20467 | m.impl("frac.out" , TORCH_FN(functionalization::frac_out_out)); |
20468 | m.impl("frac_" , TORCH_FN(functionalization::frac_)); |
20469 | m.impl("full_like.out" , TORCH_FN(functionalization::full_like_out_out)); |
20470 | m.impl("lcm.out" , TORCH_FN(functionalization::lcm_out_out)); |
20471 | m.impl("lcm_" , TORCH_FN(functionalization::lcm_)); |
20472 | m.impl("hann_window.out" , TORCH_FN(functionalization::hann_window_out_out)); |
20473 | m.impl("hann_window.periodic_out" , TORCH_FN(functionalization::hann_window_out_periodic_out)); |
20474 | m.impl("kaiser_window.out" , TORCH_FN(functionalization::kaiser_window_out_out)); |
20475 | m.impl("kaiser_window.periodic_out" , TORCH_FN(functionalization::kaiser_window_out_periodic_out)); |
20476 | m.impl("kaiser_window.beta_out" , TORCH_FN(functionalization::kaiser_window_out_beta_out)); |
20477 | m.impl("_index_put_impl.out" , TORCH_FN(functionalization::_index_put_impl_out_out)); |
20478 | m.impl("_index_put_impl_" , TORCH_FN(functionalization::_index_put_impl_)); |
20479 | m.impl("kthvalue.values" , TORCH_FN(functionalization::kthvalue_out_values)); |
20480 | m.impl("kthvalue.dimname" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim)>(at::native::kthvalue)); |
20481 | m.impl("kthvalue.dimname_out" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices)>(at::native::kthvalue_out)); |
20482 | m.impl("native_layer_norm.out" , TORCH_FN(functionalization::native_layer_norm_out_out)); |
20483 | m.impl("native_layer_norm_backward.out" , TORCH_FN(functionalization::native_layer_norm_backward_out_out)); |
20484 | m.impl("mkldnn_linear_backward_input.out" , TORCH_FN(functionalization::mkldnn_linear_backward_input_out_out)); |
20485 | m.impl("mkldnn_linear_backward.out" , TORCH_FN(functionalization::mkldnn_linear_backward_out_out)); |
20486 | m.impl("log10.out" , TORCH_FN(functionalization::log10_out_out)); |
20487 | m.impl("log10_" , TORCH_FN(functionalization::log10_)); |
20488 | m.impl("log1p.out" , TORCH_FN(functionalization::log1p_out_out)); |
20489 | m.impl("log1p_" , TORCH_FN(functionalization::log1p_)); |
20490 | m.impl("logsumexp.out" , TORCH_FN(functionalization::logsumexp_out_out)); |
20491 | m.impl("logsumexp.names" , static_cast<at::Tensor (*)(const at::Tensor & self, at::DimnameList dim, bool keepdim)>(at::native::logsumexp)); |
20492 | m.impl("logsumexp.names_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out)>(at::native::logsumexp_out)); |
20493 | m.impl("matmul" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::matmul)); |
20494 | m.impl("matmul.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::matmul_out)); |
20495 | m.impl("mkldnn_max_pool2d.out" , TORCH_FN(functionalization::mkldnn_max_pool2d_out_out)); |
20496 | m.impl("quantized_max_pool1d.out" , TORCH_FN(functionalization::quantized_max_pool1d_out_out)); |
20497 | m.impl("nanmean" , static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype)>(at::native::nanmean)); |
20498 | m.impl("nanmean.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out)>(at::native::nanmean_out)); |
20499 | m.impl("_mps_convolution.out" , TORCH_FN(functionalization::_mps_convolution_out_out)); |
20500 | m.impl("mkldnn_convolution.out" , TORCH_FN(functionalization::mkldnn_convolution_out_out)); |
20501 | m.impl("mkldnn_rnn_layer.out" , TORCH_FN(functionalization::mkldnn_rnn_layer_out_out)); |
20502 | m.impl("miopen_batch_norm.out" , TORCH_FN(functionalization::miopen_batch_norm_out_out)); |
20503 | m.impl("miopen_batch_norm_backward.out" , TORCH_FN(functionalization::miopen_batch_norm_backward_out_out)); |
20504 | m.impl("miopen_convolution_transpose.out" , TORCH_FN(functionalization::miopen_convolution_transpose_out_out)); |
20505 | m.impl("mm.out" , TORCH_FN(functionalization::mm_out_out)); |
20506 | m.impl("_sparse_sparse_matmul.out" , TORCH_FN(functionalization::_sparse_sparse_matmul_out_out)); |
20507 | m.impl("mul.out" , TORCH_FN(functionalization::mul_out_out)); |
20508 | m.impl("mul_.Tensor" , TORCH_FN(functionalization::mul__Tensor)); |
20509 | m.impl("mul.Scalar_out" , TORCH_FN(functionalization::mul_out_Scalar_out)); |
20510 | m.impl("mul_.Scalar" , TORCH_FN(functionalization::mul__Scalar)); |
20511 | m.impl("mvlgamma.out" , TORCH_FN(functionalization::mvlgamma_out_out)); |
20512 | m.impl("mvlgamma_" , TORCH_FN(functionalization::mvlgamma_)); |
20513 | m.impl("batch_norm_backward_reduce.out" , TORCH_FN(functionalization::batch_norm_backward_reduce_out_out)); |
20514 | m.impl("deg2rad.out" , TORCH_FN(functionalization::deg2rad_out_out)); |
20515 | m.impl("deg2rad_" , TORCH_FN(functionalization::deg2rad_)); |
20516 | m.impl("randint_like.out" , TORCH_FN(functionalization::randint_like_out_out)); |
20517 | m.impl("randint_like.low_dtype_out" , TORCH_FN(functionalization::randint_like_out_low_dtype_out)); |
20518 | m.impl("repeat.out" , TORCH_FN(functionalization::repeat_out_out)); |
20519 | m.impl("_mkldnn_reshape.out" , TORCH_FN(functionalization::_mkldnn_reshape_out_out)); |
20520 | m.impl("round.out" , TORCH_FN(functionalization::round_out_out)); |
20521 | m.impl("round_" , TORCH_FN(functionalization::round_)); |
20522 | m.impl("round.decimals_out" , TORCH_FN(functionalization::round_out_decimals_out)); |
20523 | m.impl("round_.decimals" , TORCH_FN(functionalization::round__decimals)); |
20524 | m.impl("rsqrt.out" , TORCH_FN(functionalization::rsqrt_out_out)); |
20525 | m.impl("rsqrt_" , TORCH_FN(functionalization::rsqrt_)); |
20526 | m.impl("celu.out" , TORCH_FN(functionalization::celu_out_out)); |
20527 | m.impl("celu_" , TORCH_FN(functionalization::celu_)); |
20528 | m.impl("sigmoid.out" , TORCH_FN(functionalization::sigmoid_out_out)); |
20529 | m.impl("sigmoid_" , TORCH_FN(functionalization::sigmoid_)); |
20530 | m.impl("sinc.out" , TORCH_FN(functionalization::sinc_out_out)); |
20531 | m.impl("sinc_" , TORCH_FN(functionalization::sinc_)); |
20532 | m.impl("sinh.out" , TORCH_FN(functionalization::sinh_out_out)); |
20533 | m.impl("sinh_" , TORCH_FN(functionalization::sinh_)); |
20534 | m.impl("slice_backward.out" , TORCH_FN(functionalization::slice_backward_out_out)); |
20535 | m.impl("as_strided_scatter.out" , TORCH_FN(functionalization::as_strided_scatter_out_out)); |
20536 | m.impl("std_mean.correction_out" , TORCH_FN(functionalization::std_mean_out_correction_out)); |
20537 | m.impl("_mkldnn_transpose.out" , TORCH_FN(functionalization::_mkldnn_transpose_out_out)); |
20538 | m.impl("_mkldnn_transpose_" , TORCH_FN(functionalization::_mkldnn_transpose_)); |
20539 | m.impl("flip.out" , TORCH_FN(functionalization::flip_out_out)); |
20540 | m.impl("_nested_tensor_from_mask.out" , TORCH_FN(functionalization::_nested_tensor_from_mask_out_out)); |
20541 | m.impl("_nested_from_padded_and_nested_example.out" , TORCH_FN(functionalization::_nested_from_padded_and_nested_example_out_out)); |
20542 | m.impl("unique_dim.out" , TORCH_FN(functionalization::unique_dim_out_out)); |
20543 | m.impl("_unsafe_view.out" , TORCH_FN(functionalization::_unsafe_view_out_out)); |
20544 | m.impl("var_mean.correction_out" , TORCH_FN(functionalization::var_mean_out_correction_out)); |
20545 | m.impl("zeros.names_out" , TORCH_FN(functionalization::zeros_out_names_out)); |
20546 | m.impl("zeros.out" , TORCH_FN(functionalization::zeros_out_out)); |
20547 | m.impl("zeros_like.out" , TORCH_FN(functionalization::zeros_like_out_out)); |
20548 | m.impl("_sparse_csr_prod.dim_dtype_out" , TORCH_FN(functionalization::_sparse_csr_prod_out_dim_dtype_out)); |
20549 | m.impl("_spdiags.out" , TORCH_FN(functionalization::_spdiags_out_out)); |
20550 | m.impl("rsub.Tensor_out" , TORCH_FN(functionalization::rsub_out_Tensor_out)); |
20551 | m.impl("rsub.Scalar_out" , TORCH_FN(functionalization::rsub_out_Scalar_out)); |
20552 | m.impl("_sparse_addmm.out" , TORCH_FN(functionalization::_sparse_addmm_out_out)); |
20553 | m.impl("sparse_coo_tensor.size_out" , TORCH_FN(functionalization::sparse_coo_tensor_out_size_out)); |
20554 | m.impl("sparse_resize.out" , TORCH_FN(functionalization::sparse_resize_out_out)); |
20555 | m.impl("sparse_resize_" , TORCH_FN(functionalization::sparse_resize_)); |
20556 | m.impl("sparse_mask.out" , TORCH_FN(functionalization::sparse_mask_out_out)); |
20557 | m.impl("_coalesce.out" , TORCH_FN(functionalization::_coalesce_out_out)); |
20558 | m.impl("dequantize.self_out" , TORCH_FN(functionalization::dequantize_out_self_out)); |
20559 | m.impl("dequantize.tensors_out" , TORCH_FN(functionalization::dequantize_out_tensors_out)); |
20560 | m.impl("q_per_channel_zero_points.out" , TORCH_FN(functionalization::q_per_channel_zero_points_out_out)); |
20561 | m.impl("_fake_quantize_learnable_per_channel_affine.out" , TORCH_FN(functionalization::_fake_quantize_learnable_per_channel_affine_out_out)); |
20562 | m.impl("_fused_moving_avg_obs_fq_helper.out" , TORCH_FN(functionalization::_fused_moving_avg_obs_fq_helper_out_out)); |
20563 | m.impl("_fused_moving_avg_obs_fq_helper" , TORCH_FN(functionalization::_fused_moving_avg_obs_fq_helper)); |
20564 | m.impl("_to_copy.out" , TORCH_FN(functionalization::_to_copy_out_out)); |
20565 | m.impl("_thnn_fused_gru_cell.out" , TORCH_FN(functionalization::_thnn_fused_gru_cell_out_out)); |
20566 | m.impl("_pack_padded_sequence.out" , TORCH_FN(functionalization::_pack_padded_sequence_out_out)); |
20567 | m.impl("scatter_reduce.two_out" , TORCH_FN(functionalization::scatter_reduce_out_two_out)); |
20568 | m.impl("scatter_reduce_.two" , TORCH_FN(functionalization::scatter_reduce__two)); |
20569 | m.impl("bitwise_xor.Tensor_out" , TORCH_FN(functionalization::bitwise_xor_out_Tensor_out)); |
20570 | m.impl("bitwise_xor_.Tensor" , TORCH_FN(functionalization::bitwise_xor__Tensor)); |
20571 | m.impl("bitwise_xor.Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::bitwise_xor)); |
20572 | m.impl("bitwise_xor.Scalar_out" , TORCH_FN(functionalization::bitwise_xor_out_Scalar_out)); |
20573 | m.impl("bitwise_xor_.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::bitwise_xor_)); |
20574 | m.impl("bitwise_xor.Scalar_Tensor_out" , TORCH_FN(functionalization::bitwise_xor_out_Scalar_Tensor_out)); |
20575 | m.impl("addbmm.out" , TORCH_FN(functionalization::addbmm_out_out)); |
20576 | m.impl("addbmm_" , TORCH_FN(functionalization::addbmm_)); |
20577 | m.impl("random.from_out" , TORCH_FN(functionalization::random_out_from_out)); |
20578 | m.impl("random_.from" , TORCH_FN(functionalization::random__from)); |
20579 | m.impl("random.to_out" , TORCH_FN(functionalization::random_out_to_out)); |
20580 | m.impl("random_.to" , TORCH_FN(functionalization::random__to)); |
20581 | m.impl("random.out" , TORCH_FN(functionalization::random_out_out)); |
20582 | m.impl("random_" , TORCH_FN(functionalization::random_)); |
20583 | m.impl("exponential.out" , TORCH_FN(functionalization::exponential_out_out)); |
20584 | m.impl("exponential_" , TORCH_FN(functionalization::exponential_)); |
20585 | m.impl("geometric.out" , TORCH_FN(functionalization::geometric_out_out)); |
20586 | m.impl("geometric_" , TORCH_FN(functionalization::geometric_)); |
20587 | m.impl("cross" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim)>(at::native::cross)); |
20588 | m.impl("cross.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim, at::Tensor & out)>(at::native::cross_out)); |
20589 | m.impl("trace.out" , TORCH_FN(functionalization::trace_out_out)); |
20590 | m.impl("take_along_dim" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim)>(at::native::take_along_dim)); |
20591 | m.impl("take_along_dim.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim, at::Tensor & out)>(at::native::take_along_dim_out)); |
20592 | m.impl("index_select.out" , TORCH_FN(functionalization::index_select_out_out)); |
20593 | m.impl("index_select.dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, const at::Tensor & index)>(at::native::index_select)); |
20594 | m.impl("index_select.dimname_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out)>(at::native::index_select_out)); |
20595 | m.impl("masked_select.out" , TORCH_FN(functionalization::masked_select_out_out)); |
20596 | m.impl("linalg_solve_triangular.out" , TORCH_FN(functionalization::linalg_solve_triangular_out_out)); |
20597 | m.impl("svd" , static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor> (*)(const at::Tensor & self, bool some, bool compute_uv)>(at::native::svd)); |
20598 | m.impl("svd.U" , static_cast<::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V)>(at::native::svd_out)); |
20599 | m.impl("multinomial.out" , TORCH_FN(functionalization::multinomial_out_out)); |
20600 | m.impl("histogram.bins_tensor_out" , TORCH_FN(functionalization::histogram_out_bins_tensor_out)); |
20601 | m.impl("histogram.bin_ct_out" , TORCH_FN(functionalization::histogram_out_bin_ct_out)); |
20602 | m.impl("igammac.out" , TORCH_FN(functionalization::igammac_out_out)); |
20603 | m.impl("igammac_" , TORCH_FN(functionalization::igammac_)); |
20604 | m.impl("remainder.Scalar_out" , TORCH_FN(functionalization::remainder_out_Scalar_out)); |
20605 | m.impl("remainder_.Scalar" , TORCH_FN(functionalization::remainder__Scalar)); |
20606 | m.impl("remainder.Tensor_out" , TORCH_FN(functionalization::remainder_out_Tensor_out)); |
20607 | m.impl("remainder_.Tensor" , TORCH_FN(functionalization::remainder__Tensor)); |
20608 | m.impl("remainder.Scalar_Tensor_out" , TORCH_FN(functionalization::remainder_out_Scalar_Tensor_out)); |
20609 | m.impl("quantile" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation)>(at::native::quantile)); |
20610 | m.impl("quantile.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out)>(at::native::quantile_out)); |
20611 | m.impl("quantile.scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation)>(at::native::quantile)); |
20612 | m.impl("quantile.scalar_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out)>(at::native::quantile_out)); |
20613 | m.impl("nanquantile" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation)>(at::native::nanquantile)); |
20614 | m.impl("nanquantile.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out)>(at::native::nanquantile_out)); |
20615 | m.impl("nanquantile.scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation)>(at::native::nanquantile)); |
20616 | m.impl("nanquantile.scalar_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out)>(at::native::nanquantile_out)); |
20617 | m.impl("sort.values" , TORCH_FN(functionalization::sort_out_values)); |
20618 | m.impl("sort.values_stable" , TORCH_FN(functionalization::sort_out_values_stable)); |
20619 | m.impl("sort.dimname" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::Dimname dim, bool descending)>(at::native::sort)); |
20620 | m.impl("sort.dimname_values" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices)>(at::native::sort_out)); |
20621 | m.impl("sort.dimname_stable" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending)>(at::native::sort)); |
20622 | m.impl("sort.dimname_values_stable" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices)>(at::native::sort_out)); |
20623 | m.impl("argsort.stable_out" , TORCH_FN(functionalization::argsort_out_stable_out)); |
20624 | m.impl("all.all_out" , TORCH_FN(functionalization::all_out_all_out)); |
20625 | m.impl("renorm.out" , TORCH_FN(functionalization::renorm_out_out)); |
20626 | m.impl("renorm_" , TORCH_FN(functionalization::renorm_)); |
20627 | m.impl("unfold_backward.out" , TORCH_FN(functionalization::unfold_backward_out_out)); |
20628 | m.impl("pow.Tensor_Tensor_out" , TORCH_FN(functionalization::pow_out_Tensor_Tensor_out)); |
20629 | m.impl("pow_.Tensor" , TORCH_FN(functionalization::pow__Tensor)); |
20630 | m.impl("pow.Scalar_out" , TORCH_FN(functionalization::pow_out_Scalar_out)); |
20631 | m.impl("pow.Tensor_Scalar_out" , TORCH_FN(functionalization::pow_out_Tensor_Scalar_out)); |
20632 | m.impl("pow_.Scalar" , TORCH_FN(functionalization::pow__Scalar)); |
20633 | m.impl("float_power.Tensor_Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & exponent)>(at::native::float_power)); |
20634 | m.impl("float_power.Tensor_Tensor_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out)>(at::native::float_power_out)); |
20635 | m.impl("float_power_.Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & exponent)>(at::native::float_power_)); |
20636 | m.impl("float_power.Scalar" , static_cast<at::Tensor (*)(const at::Scalar & self, const at::Tensor & exponent)>(at::native::float_power)); |
20637 | m.impl("float_power.Scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out)>(at::native::float_power_out)); |
20638 | m.impl("float_power.Tensor_Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & exponent)>(at::native::float_power)); |
20639 | m.impl("float_power.Tensor_Scalar_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out)>(at::native::float_power_out)); |
20640 | m.impl("float_power_.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & exponent)>(at::native::float_power_)); |
20641 | m.impl("normal.out" , TORCH_FN(functionalization::normal_out_out)); |
20642 | m.impl("normal_" , TORCH_FN(functionalization::normal_)); |
20643 | m.impl("normal.Tensor_float_out" , TORCH_FN(functionalization::normal_out_Tensor_float_out)); |
20644 | m.impl("normal.float_Tensor_out" , TORCH_FN(functionalization::normal_out_float_Tensor_out)); |
20645 | m.impl("normal.Tensor_Tensor_out" , TORCH_FN(functionalization::normal_out_Tensor_Tensor_out)); |
20646 | m.impl("normal.float_float_out" , TORCH_FN(functionalization::normal_out_float_float_out)); |
20647 | m.impl("_amp_update_scale.out" , TORCH_FN(functionalization::_amp_update_scale_out_out)); |
20648 | m.impl("_amp_update_scale_" , TORCH_FN(functionalization::_amp_update_scale_)); |
20649 | m.impl("_foreach_atan.out" , TORCH_FN(functionalization::_foreach_atan_out_out)); |
20650 | m.impl("_foreach_atan_" , TORCH_FN(functionalization::_foreach_atan_)); |
20651 | m.impl("_foreach_erf.out" , TORCH_FN(functionalization::_foreach_erf_out_out)); |
20652 | m.impl("_foreach_erf_" , TORCH_FN(functionalization::_foreach_erf_)); |
20653 | m.impl("_foreach_erfc.out" , TORCH_FN(functionalization::_foreach_erfc_out_out)); |
20654 | m.impl("_foreach_erfc_" , TORCH_FN(functionalization::_foreach_erfc_)); |
20655 | m.impl("_foreach_log.out" , TORCH_FN(functionalization::_foreach_log_out_out)); |
20656 | m.impl("_foreach_log_" , TORCH_FN(functionalization::_foreach_log_)); |
20657 | m.impl("_foreach_sinh.out" , TORCH_FN(functionalization::_foreach_sinh_out_out)); |
20658 | m.impl("_foreach_sinh_" , TORCH_FN(functionalization::_foreach_sinh_)); |
20659 | m.impl("_foreach_lgamma.out" , TORCH_FN(functionalization::_foreach_lgamma_out_out)); |
20660 | m.impl("_foreach_lgamma_" , TORCH_FN(functionalization::_foreach_lgamma_)); |
20661 | m.impl("_foreach_lerp.List_out" , TORCH_FN(functionalization::_foreach_lerp_out_List_out)); |
20662 | m.impl("_foreach_lerp_.List" , TORCH_FN(functionalization::_foreach_lerp__List)); |
20663 | m.impl("_foreach_lerp.Scalar_out" , TORCH_FN(functionalization::_foreach_lerp_out_Scalar_out)); |
20664 | m.impl("_foreach_lerp_.Scalar" , TORCH_FN(functionalization::_foreach_lerp__Scalar)); |
20665 | m.impl("_convert_indices_from_coo_to_csr.out" , TORCH_FN(functionalization::_convert_indices_from_coo_to_csr_out_out)); |
20666 | m.impl("multi_margin_loss_backward.grad_input" , TORCH_FN(functionalization::multi_margin_loss_backward_out_grad_input)); |
20667 | m.impl("nll_loss_forward.output" , TORCH_FN(functionalization::nll_loss_forward_out_output)); |
20668 | m.impl("hardswish.out" , TORCH_FN(functionalization::hardswish_out_out)); |
20669 | m.impl("hardswish_" , TORCH_FN(functionalization::hardswish_)); |
20670 | m.impl("hardswish_backward.out" , TORCH_FN(functionalization::hardswish_backward_out_out)); |
20671 | m.impl("softshrink_backward.grad_input" , TORCH_FN(functionalization::softshrink_backward_out_grad_input)); |
20672 | m.impl("mkldnn_adaptive_avg_pool2d_backward.out" , TORCH_FN(functionalization::mkldnn_adaptive_avg_pool2d_backward_out_out)); |
20673 | m.impl("_adaptive_avg_pool2d_backward.out" , TORCH_FN(functionalization::_adaptive_avg_pool2d_backward_out_out)); |
20674 | m.impl("adaptive_avg_pool3d" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::SymIntArrayRef output_size)>(at::native::adaptive_avg_pool3d_symint)); |
20675 | m.impl("adaptive_avg_pool3d.out" , TORCH_FN(functionalization::adaptive_avg_pool3d_out_out)); |
20676 | m.impl("adaptive_max_pool3d_backward.grad_input" , TORCH_FN(functionalization::adaptive_max_pool3d_backward_out_grad_input)); |
20677 | m.impl("avg_pool2d.out" , TORCH_FN(functionalization::avg_pool2d_out_out)); |
20678 | m.impl("avg_pool3d_backward.grad_input" , TORCH_FN(functionalization::avg_pool3d_backward_out_grad_input)); |
20679 | m.impl("fractional_max_pool2d.output" , TORCH_FN(functionalization::fractional_max_pool2d_out_output)); |
20680 | m.impl("reflection_pad3d_backward.grad_input" , TORCH_FN(functionalization::reflection_pad3d_backward_out_grad_input)); |
20681 | m.impl("replication_pad2d.out" , TORCH_FN(functionalization::replication_pad2d_out_out)); |
20682 | m.impl("_upsample_bilinear2d_aa_backward.grad_input" , TORCH_FN(functionalization::_upsample_bilinear2d_aa_backward_out_grad_input)); |
20683 | m.impl("_upsample_bicubic2d_aa.out" , TORCH_FN(functionalization::_upsample_bicubic2d_aa_out_out)); |
20684 | m.impl("upsample_trilinear3d.out" , TORCH_FN(functionalization::upsample_trilinear3d_out_out)); |
20685 | m.impl("_upsample_nearest_exact1d_backward.grad_input" , TORCH_FN(functionalization::_upsample_nearest_exact1d_backward_out_grad_input)); |
20686 | m.impl("upsample_nearest2d_backward.grad_input" , TORCH_FN(functionalization::upsample_nearest2d_backward_out_grad_input)); |
20687 | m.impl("tanh_backward.grad_input" , TORCH_FN(functionalization::tanh_backward_out_grad_input)); |
20688 | m.impl("_conv_depthwise2d.out" , TORCH_FN(functionalization::_conv_depthwise2d_out_out)); |
20689 | m.impl("col2im.out" , TORCH_FN(functionalization::col2im_out_out)); |
20690 | m.impl("column_stack" , static_cast<at::Tensor (*)(at::TensorList tensors)>(at::native::column_stack)); |
20691 | m.impl("column_stack.out" , static_cast<at::Tensor & (*)(at::TensorList tensors, at::Tensor & out)>(at::native::column_stack_out)); |
20692 | m.impl("im2col.out" , TORCH_FN(functionalization::im2col_out_out)); |
20693 | m.impl("isinf.out" , TORCH_FN(functionalization::isinf_out_out)); |
20694 | m.impl("isneginf.out" , TORCH_FN(functionalization::isneginf_out_out)); |
20695 | m.impl("special_expm1" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::special_expm1)); |
20696 | m.impl("special_expm1.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::special_expm1_out)); |
20697 | m.impl("special_erf" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::special_erf)); |
20698 | m.impl("special_erf.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::special_erf_out)); |
20699 | m.impl("special_logsumexp" , static_cast<at::Tensor (*)(const at::Tensor & self, at::IntArrayRef dim, bool keepdim)>(at::native::special_logsumexp)); |
20700 | m.impl("special_logsumexp.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out)>(at::native::special_logsumexp_out)); |
20701 | m.impl("special_log1p" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::special_log1p)); |
20702 | m.impl("special_log1p.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::special_log1p_out)); |
20703 | m.impl("special_gammaincc" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::special_gammaincc)); |
20704 | m.impl("special_gammaincc.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::special_gammaincc_out)); |
20705 | m.impl("special_multigammaln" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t p)>(at::native::special_multigammaln)); |
20706 | m.impl("special_multigammaln.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, int64_t p, at::Tensor & out)>(at::native::special_multigammaln_out)); |
20707 | m.impl("fft_rfft2" , static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm)>(at::native::fft_rfft2)); |
20708 | m.impl("fft_rfft2.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_rfft2_out)); |
20709 | m.impl("fft_irfft2" , static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm)>(at::native::fft_irfft2)); |
20710 | m.impl("fft_irfft2.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_irfft2_out)); |
20711 | m.impl("fft_ihfft2" , static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm)>(at::native::fft_ihfft2)); |
20712 | m.impl("fft_ihfft2.out" , static_cast<const at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out)>(at::native::fft_ihfft2_out)); |
20713 | m.impl("fft_fftn" , static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm)>(at::native::fft_fftn)); |
20714 | m.impl("fft_fftn.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_fftn_out)); |
20715 | m.impl("fft_irfftn" , static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm)>(at::native::fft_irfftn)); |
20716 | m.impl("fft_irfftn.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_irfftn_out)); |
20717 | m.impl("linalg_lu_factor" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & A, bool pivot)>(at::native::linalg_lu_factor)); |
20718 | m.impl("linalg_lu_factor.out" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots)>(at::native::linalg_lu_factor_out)); |
20719 | m.impl("linalg_lu_factor_ex.out" , TORCH_FN(functionalization::linalg_lu_factor_ex_out_out)); |
20720 | m.impl("linalg_ldl_factor" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, bool hermitian)>(at::native::linalg_ldl_factor)); |
20721 | m.impl("linalg_ldl_factor.out" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots)>(at::native::linalg_ldl_factor_out)); |
20722 | m.impl("linalg_ldl_solve.out" , TORCH_FN(functionalization::linalg_ldl_solve_out_out)); |
20723 | m.impl("_linalg_slogdet.sign" , TORCH_FN(functionalization::_linalg_slogdet_out_sign)); |
20724 | m.impl("linalg_eigvals" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::linalg_eigvals)); |
20725 | m.impl("linalg_eigvals.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::linalg_eigvals_out)); |
20726 | m.impl("linalg_eigh" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, c10::string_view UPLO)>(at::native::linalg_eigh)); |
20727 | m.impl("linalg_eigh.eigvals" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs)>(at::native::linalg_eigh_out)); |
20728 | m.impl("linalg_householder_product.out" , TORCH_FN(functionalization::linalg_householder_product_out_out)); |
20729 | m.impl("ger" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & vec2)>(at::native::ger)); |
20730 | m.impl("ger.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out)>(at::native::ger_out)); |
20731 | m.impl("linalg_norm" , static_cast<at::Tensor (*)(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype)>(at::native::linalg_norm)); |
20732 | m.impl("linalg_norm.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out)>(at::native::linalg_norm_out)); |
20733 | m.impl("linalg_norm.ord_str" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype)>(at::native::linalg_norm)); |
20734 | m.impl("linalg_norm.ord_str_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out)>(at::native::linalg_norm_out)); |
20735 | m.impl("linalg_vector_norm.out" , TORCH_FN(functionalization::linalg_vector_norm_out_out)); |
20736 | m.impl("_linalg_solve_ex.result" , TORCH_FN(functionalization::_linalg_solve_ex_out_result)); |
20737 | m.impl("linalg_solve" , static_cast<at::Tensor (*)(const at::Tensor & A, const at::Tensor & B, bool left)>(at::native::linalg_solve)); |
20738 | m.impl("linalg_solve.out" , static_cast<at::Tensor & (*)(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out)>(at::native::linalg_solve_out)); |
20739 | m.impl("linalg_multi_dot" , static_cast<at::Tensor (*)(at::TensorList tensors)>(at::native::linalg_multi_dot)); |
20740 | m.impl("linalg_multi_dot.out" , static_cast<at::Tensor & (*)(at::TensorList tensors, at::Tensor & out)>(at::native::linalg_multi_dot_out)); |
20741 | m.impl("_test_optional_filled_intlist.out" , TORCH_FN(functionalization::_test_optional_filled_intlist_out_out)); |
20742 | m.impl("_test_autograd_multiple_dispatch.fullcoverage_out" , TORCH_FN(functionalization::_test_autograd_multiple_dispatch_out_fullcoverage_out)); |
20743 | m.impl("_test_autograd_multiple_dispatch_view_copy.out" , TORCH_FN(functionalization::_test_autograd_multiple_dispatch_view_copy_out_out)); |
20744 | m.impl("segment_reduce.out" , TORCH_FN(functionalization::segment_reduce_out_out)); |
20745 | m.impl("_nested_tensor_from_tensor_list.out" , TORCH_FN(functionalization::_nested_tensor_from_tensor_list_out_out)); |
20746 | m.impl("diagonal_copy.out" , TORCH_FN(functionalization::diagonal_copy_out_out)); |
20747 | m.impl("detach_copy.out" , TORCH_FN(functionalization::detach_copy_out_out)); |
20748 | m.impl("slice_copy.Tensor_out" , TORCH_FN(functionalization::slice_copy_out_Tensor_out)); |
20749 | m.impl("transpose_copy.int_out" , TORCH_FN(functionalization::transpose_copy_out_int_out)); |
20750 | m.impl("indices_copy.out" , TORCH_FN(functionalization::indices_copy_out_out)); |
20751 | m.impl("row_indices_copy.out" , TORCH_FN(functionalization::row_indices_copy_out_out)); |
20752 | m.impl("_triton_multi_head_attention.out" , TORCH_FN(functionalization::_triton_multi_head_attention_out_out)); |
20753 | m.impl("special_bessel_j1.out" , TORCH_FN(functionalization::special_bessel_j1_out_out)); |
20754 | m.impl("special_bessel_y1.out" , TORCH_FN(functionalization::special_bessel_y1_out_out)); |
20755 | m.impl("special_legendre_polynomial_p.out" , TORCH_FN(functionalization::special_legendre_polynomial_p_out_out)); |
20756 | m.impl("special_legendre_polynomial_p.x_scalar" , static_cast<at::Tensor (*)(const at::Scalar & x, const at::Tensor & n)>(at::native::special_legendre_polynomial_p)); |
20757 | m.impl("special_legendre_polynomial_p.x_scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & x, const at::Tensor & n, at::Tensor & out)>(at::native::special_legendre_polynomial_p_out)); |
20758 | m.impl("special_legendre_polynomial_p.n_scalar" , static_cast<at::Tensor (*)(const at::Tensor & x, const at::Scalar & n)>(at::native::special_legendre_polynomial_p)); |
20759 | m.impl("special_legendre_polynomial_p.n_scalar_out" , TORCH_FN(functionalization::special_legendre_polynomial_p_out_n_scalar_out)); |
20760 | m.impl("special_modified_bessel_i0.out" , TORCH_FN(functionalization::special_modified_bessel_i0_out_out)); |
20761 | m.impl("special_shifted_chebyshev_polynomial_t.out" , TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_t_out_out)); |
20762 | m.impl("special_shifted_chebyshev_polynomial_t.x_scalar" , static_cast<at::Tensor (*)(const at::Scalar & x, const at::Tensor & n)>(at::native::special_shifted_chebyshev_polynomial_t)); |
20763 | m.impl("special_shifted_chebyshev_polynomial_t.x_scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & x, const at::Tensor & n, at::Tensor & out)>(at::native::special_shifted_chebyshev_polynomial_t_out)); |
20764 | m.impl("special_shifted_chebyshev_polynomial_t.n_scalar" , static_cast<at::Tensor (*)(const at::Tensor & x, const at::Scalar & n)>(at::native::special_shifted_chebyshev_polynomial_t)); |
20765 | m.impl("special_shifted_chebyshev_polynomial_t.n_scalar_out" , TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_t_out_n_scalar_out)); |
20766 | m.impl("special_shifted_chebyshev_polynomial_u.out" , TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_u_out_out)); |
20767 | m.impl("special_shifted_chebyshev_polynomial_u.x_scalar" , static_cast<at::Tensor (*)(const at::Scalar & x, const at::Tensor & n)>(at::native::special_shifted_chebyshev_polynomial_u)); |
20768 | m.impl("special_shifted_chebyshev_polynomial_u.x_scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & x, const at::Tensor & n, at::Tensor & out)>(at::native::special_shifted_chebyshev_polynomial_u_out)); |
20769 | m.impl("special_shifted_chebyshev_polynomial_u.n_scalar" , static_cast<at::Tensor (*)(const at::Tensor & x, const at::Scalar & n)>(at::native::special_shifted_chebyshev_polynomial_u)); |
20770 | m.impl("special_shifted_chebyshev_polynomial_u.n_scalar_out" , TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_u_out_n_scalar_out)); |
20771 | m.impl("special_shifted_chebyshev_polynomial_w.out" , TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_w_out_out)); |
20772 | m.impl("special_shifted_chebyshev_polynomial_w.x_scalar" , static_cast<at::Tensor (*)(const at::Scalar & x, const at::Tensor & n)>(at::native::special_shifted_chebyshev_polynomial_w)); |
20773 | m.impl("special_shifted_chebyshev_polynomial_w.x_scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & x, const at::Tensor & n, at::Tensor & out)>(at::native::special_shifted_chebyshev_polynomial_w_out)); |
20774 | m.impl("special_shifted_chebyshev_polynomial_w.n_scalar" , static_cast<at::Tensor (*)(const at::Tensor & x, const at::Scalar & n)>(at::native::special_shifted_chebyshev_polynomial_w)); |
20775 | m.impl("special_shifted_chebyshev_polynomial_w.n_scalar_out" , TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_w_out_n_scalar_out)); |
20776 | m.impl("_fused_adam.out" , TORCH_FN(functionalization::_fused_adam_out_out)); |
20777 | m.impl("_fused_adam_" , TORCH_FN(functionalization::_fused_adam_)); |
20778 | m.impl("rename" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<at::DimnameList> names)>(at::native::rename)); |
20779 | m.impl("rename_" , static_cast<at::Tensor & (*)(at::Tensor & self, c10::optional<at::DimnameList> names)>(at::native::rename_)); |
20780 | m.impl("imag" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::imag)); |
20781 | m.impl("_conj" , TORCH_FN(functionalization::_conj)); |
20782 | m.impl("conj" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::conj)); |
20783 | m.impl("resolve_conj" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::resolve_conj)); |
20784 | m.impl("as_strided" , TORCH_FN(functionalization::as_strided)); |
20785 | m.impl("as_strided_" , TORCH_FN(functionalization::as_strided_)); |
20786 | m.impl("_sparse_broadcast_to" , TORCH_FN(functionalization::_sparse_broadcast_to)); |
20787 | m.impl("chunk" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, int64_t chunks, int64_t dim)>(at::native::chunk)); |
20788 | m.impl("tensor_split.sections" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, c10::SymInt sections, int64_t dim)>(at::native::tensor_split_sections_symint)); |
20789 | m.impl("tensor_split.indices" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim)>(at::native::tensor_split_indices_symint)); |
20790 | m.impl("tensor_split.tensor_indices_or_sections" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim)>(at::native::tensor_split)); |
20791 | m.impl("expand_as" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::expand_as)); |
20792 | m.impl("unflatten.int" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t dim, at::IntArrayRef sizes)>(at::native::unflatten)); |
20793 | m.impl("unflatten.Dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names)>(at::native::unflatten)); |
20794 | m.impl("permute" , TORCH_FN(functionalization::permute)); |
20795 | m.impl("movedim.intlist" , static_cast<at::Tensor (*)(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination)>(at::native::movedim)); |
20796 | m.impl("movedim.int" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t source, int64_t destination)>(at::native::movedim)); |
20797 | m.impl("mH" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::mH)); |
20798 | m.impl("pin_memory" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<at::Device> device)>(at::native::pin_memory)); |
20799 | m.impl("_reshape_alias" , TORCH_FN(functionalization::_reshape_alias)); |
20800 | m.impl("detach" , TORCH_FN(functionalization::detach)); |
20801 | m.impl("detach_" , TORCH_FN(functionalization::detach_)); |
20802 | m.impl("split.Tensor" , TORCH_FN(functionalization::split_Tensor)); |
20803 | m.impl("split.sizes" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim)>(at::native::split_symint)); |
20804 | m.impl("dsplit.int" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, int64_t sections)>(at::native::dsplit)); |
20805 | m.impl("dsplit.array" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, at::IntArrayRef indices)>(at::native::dsplit)); |
20806 | m.impl("positive" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::positive)); |
20807 | m.impl("values" , TORCH_FN(functionalization::values)); |
20808 | m.impl("row_indices" , TORCH_FN(functionalization::row_indices)); |
20809 | m.impl("swapdims" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t dim0, int64_t dim1)>(at::native::swapdims)); |
20810 | m.impl("swapdims_" , static_cast<at::Tensor & (*)(at::Tensor & self, int64_t dim0, int64_t dim1)>(at::native::swapdims_)); |
20811 | m.impl("_test_autograd_multiple_dispatch_view" , TORCH_FN(functionalization::_test_autograd_multiple_dispatch_view)); |
20812 | m.impl("_cast_Double" , static_cast<at::Tensor (*)(const at::Tensor & self, bool non_blocking)>(at::native::_cast_Double)); |
20813 | m.impl("_cast_Int" , static_cast<at::Tensor (*)(const at::Tensor & self, bool non_blocking)>(at::native::_cast_Int)); |
20814 | m.impl("_cast_Short" , static_cast<at::Tensor (*)(const at::Tensor & self, bool non_blocking)>(at::native::_cast_Short)); |
20815 | m.impl("output_nr" , static_cast<int64_t (*)(const at::Tensor & self)>(at::native::output_nr)); |
20816 | m.impl("_use_cudnn_rnn_flatten_weight" , static_cast<bool (*)()>(at::native::_use_cudnn_rnn_flatten_weight)); |
20817 | m.impl("_sobol_engine_initialize_state_" , static_cast<at::Tensor & (*)(at::Tensor & self, int64_t dimension)>(at::native::_sobol_engine_initialize_state_)); |
20818 | m.impl("_shape_as_tensor" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::_shape_as_tensor)); |
20819 | m.impl("_dim_arange" , static_cast<at::Tensor (*)(const at::Tensor & like, int64_t dim)>(at::native::_dim_arange)); |
20820 | m.impl("cudnn_is_acceptable" , static_cast<bool (*)(const at::Tensor & self)>(at::native::cudnn_is_acceptable)); |
20821 | m.impl("_convolution.deprecated" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled)>(at::native::_convolution)); |
20822 | m.impl("conv2d" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups)>(at::native::conv2d)); |
20823 | m.impl("conv2d.padding" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups)>(at::native::conv2d)); |
20824 | m.impl("conv_tbc_backward" , static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor> (*)(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad)>(at::native::conv_tbc_backward)); |
20825 | m.impl("cov" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t correction, const c10::optional<at::Tensor> & fweights, const c10::optional<at::Tensor> & aweights)>(at::native::cov)); |
20826 | m.impl("divide.Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::divide)); |
20827 | m.impl("divide_.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::divide_)); |
20828 | m.impl("divide.Scalar_mode" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode)>(at::native::divide)); |
20829 | m.impl("divide_.Scalar_mode" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode)>(at::native::divide_)); |
20830 | m.impl("true_divide.Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::true_divide)); |
20831 | m.impl("true_divide_.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::true_divide_)); |
20832 | m.impl("embedding_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse)>(at::native::embedding_backward_symint)); |
20833 | m.impl("embedding_bag" , static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> (*)(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset)>(at::native::embedding_bag)); |
20834 | m.impl("embedding_bag.padding_idx" , static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> (*)(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx)>(at::native::embedding_bag)); |
20835 | m.impl("_embedding_bag_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx)>(at::native::_embedding_bag_backward_symint)); |
20836 | m.impl("_cufft_get_plan_cache_size" , static_cast<int64_t (*)(int64_t device_index)>(at::native::_cufft_get_plan_cache_size)); |
20837 | m.impl("_cufft_get_plan_cache_max_size" , static_cast<int64_t (*)(int64_t device_index)>(at::native::_cufft_get_plan_cache_max_size)); |
20838 | m.impl("_is_zerotensor" , static_cast<bool (*)(const at::Tensor & self)>(at::native::_is_zerotensor)); |
20839 | m.impl("is_inference" , static_cast<bool (*)(const at::Tensor & self)>(at::native::is_inference)); |
20840 | m.impl("kl_div" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target)>(at::native::kl_div)); |
20841 | m.impl("margin_ranking_loss" , static_cast<at::Tensor (*)(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction)>(at::native::margin_ranking_loss)); |
20842 | m.impl("matrix_exp" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::matrix_exp)); |
20843 | m.impl("_sparse_mm" , static_cast<at::Tensor (*)(const at::Tensor & sparse, const at::Tensor & dense)>(at::native::_sparse_mm)); |
20844 | m.impl("_sparse_mm.reduce" , static_cast<at::Tensor (*)(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce)>(at::native::_sparse_mm)); |
20845 | m.impl("_nnpack_available" , static_cast<bool (*)()>(at::native::_nnpack_available)); |
20846 | m.impl("relu6" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::relu6)); |
20847 | m.impl("relu6_" , static_cast<at::Tensor & (*)(at::Tensor & self)>(at::native::relu6_)); |
20848 | m.impl("selu" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::selu)); |
20849 | m.impl("selu_" , static_cast<at::Tensor & (*)(at::Tensor & self)>(at::native::selu_)); |
20850 | m.impl("istft" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, bool normalized, c10::optional<bool> onesided, c10::optional<int64_t> length, bool return_complex)>(at::native::istft)); |
20851 | m.impl("std_mean" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, bool unbiased)>(at::native::std_mean)); |
20852 | m.impl("std_mean.dim" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim)>(at::native::std_mean)); |
20853 | m.impl("std_mean.names_dim" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim)>(at::native::std_mean)); |
20854 | m.impl("std_mean.correction_names" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim)>(at::native::std_mean)); |
20855 | m.impl("flipud" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::flipud)); |
20856 | m.impl("var_mean" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, bool unbiased)>(at::native::var_mean)); |
20857 | m.impl("var_mean.dim" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim)>(at::native::var_mean)); |
20858 | m.impl("var_mean.names_dim" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim)>(at::native::var_mean)); |
20859 | m.impl("var_mean.correction_names" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim)>(at::native::var_mean)); |
20860 | m.impl("norm_except_dim" , static_cast<at::Tensor (*)(const at::Tensor & v, int64_t pow, int64_t dim)>(at::native::norm_except_dim)); |
20861 | m.impl("_sparse_compressed_tensor_unsafe" , static_cast<at::Tensor (*)(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::_sparse_compressed_tensor_unsafe)); |
20862 | m.impl("sparse_coo_tensor.indices" , static_cast<at::Tensor (*)(const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::sparse_coo_tensor)); |
20863 | m.impl("sparse_coo_tensor.indices_size" , static_cast<at::Tensor (*)(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::sparse_coo_tensor)); |
20864 | m.impl("_validate_sparse_bsc_tensor_args" , static_cast<void (*)(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size)>(at::native::_validate_sparse_bsc_tensor_args)); |
20865 | m.impl("to_mkldnn_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad, const at::Tensor & input)>(at::native::to_mkldnn_backward)); |
20866 | m.impl("fake_quantize_per_tensor_affine" , static_cast<at::Tensor (*)(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max)>(at::native::fake_quantize_per_tensor_affine)); |
20867 | m.impl("fake_quantize_per_tensor_affine.tensor_qparams" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max)>(at::native::fake_quantize_per_tensor_affine)); |
20868 | m.impl("fused_moving_avg_obs_fake_quant" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant)>(at::native::fused_moving_avg_obs_fake_quant)); |
20869 | m.impl("_choose_qparams_per_tensor" , static_cast<::std::tuple<double,int64_t> (*)(const at::Tensor & self, bool reduce_range)>(at::native::_choose_qparams_per_tensor)); |
20870 | m.impl("meshgrid" , static_cast<::std::vector<at::Tensor> (*)(at::TensorList tensors)>(at::native::meshgrid)); |
20871 | m.impl("meshgrid.indexing" , static_cast<::std::vector<at::Tensor> (*)(at::TensorList tensors, c10::string_view indexing)>(at::native::meshgrid)); |
20872 | m.impl("cartesian_prod" , static_cast<at::Tensor (*)(at::TensorList tensors)>(at::native::cartesian_prod)); |
20873 | m.impl("can_cast" , static_cast<bool (*)(at::ScalarType from, at::ScalarType to)>(at::native::can_cast)); |
20874 | m.impl("promote_types" , static_cast<at::ScalarType (*)(at::ScalarType type1, at::ScalarType type2)>(at::native::promote_types)); |
20875 | m.impl("rnn_tanh.input" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first)>(at::native::rnn_tanh)); |
20876 | m.impl("rnn_tanh.data" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional)>(at::native::rnn_tanh)); |
20877 | m.impl("gru_cell" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh)>(at::native::gru_cell)); |
20878 | m.impl("rnn_relu_cell" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh)>(at::native::rnn_relu_cell)); |
20879 | m.impl("_pad_packed_sequence" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length)>(at::native::_pad_packed_sequence)); |
20880 | m.impl("gather_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad)>(at::native::gather_backward)); |
20881 | m.impl("_gather_sparse_backward" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad)>(at::native::_gather_sparse_backward)); |
20882 | m.impl("linalg_vander" , static_cast<at::Tensor (*)(const at::Tensor & x, c10::optional<int64_t> N)>(at::native::linalg_vander)); |
20883 | m.impl("argsort" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t dim, bool descending)>(at::native::argsort)); |
20884 | m.impl("argsort.dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, bool descending)>(at::native::argsort)); |
20885 | m.impl("_pad_circular" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::SymIntArrayRef pad)>(at::native::_pad_circular_symint)); |
20886 | m.impl("upsample_trilinear3d.vec" , static_cast<at::Tensor (*)(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors)>(at::native::upsample_trilinear3d)); |
20887 | m.impl("_upsample_bicubic2d_aa.vec" , static_cast<at::Tensor (*)(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors)>(at::native::_upsample_bicubic2d_aa)); |
20888 | m.impl("special_softmax" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype)>(at::native::special_softmax)); |
20889 | m.impl("fft_fftshift" , static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef dim)>(at::native::fft_fftshift)); |
20890 | m.impl("nested_to_padded_tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size)>(at::native::nested_to_padded_tensor)); |
20891 | m.impl("_test_ambiguous_defaults.a" , static_cast<at::Tensor (*)(const at::Tensor & dummy, int64_t a, int64_t b)>(at::native::_test_ambiguous_defaults)); |
20892 | m.impl("_test_ambiguous_defaults.b" , static_cast<at::Tensor (*)(const at::Tensor & dummy, int64_t a, c10::string_view b)>(at::native::_test_ambiguous_defaults)); |
20893 | m.impl("_test_autograd_multiple_dispatch.ntonly" , static_cast<at::Tensor (*)(const at::Tensor & self, bool b)>(at::native::_test_autograd_multiple_dispatch_ntonly)); |
20894 | m.impl("pad_sequence" , static_cast<at::Tensor (*)(at::TensorList sequences, bool batch_first, double padding_value)>(at::native::pad_sequence)); |
20895 | m.impl("flatten_dense_tensors" , static_cast<at::Tensor (*)(at::TensorList tensors)>(at::native::flatten_dense_tensors)); |
20896 | m.impl("_scaled_dot_product_attention" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool need_attn_weights, bool is_causal)>(at::native::_scaled_dot_product_attention));; |
20897 | } |
20898 | |
20899 | } // namespace |
20900 | |
20901 | } // namespace at |
20902 | |