1#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2// @generated by torchgen/gen.py from RegisterFunctionalization.cpp
3
4#include <ATen/core/LegacyTypeDispatch.h>
5#include <ATen/EmptyTensor.h>
6#include <ATen/FunctionalTensorWrapper.h>
7#include <ATen/FunctionalInverses.h>
8#include <torch/library.h>
9
10#ifndef AT_PER_OPERATOR_HEADERS
11#include <ATen/Operators.h>
12#include <ATen/NativeFunctions.h>
13#else
14// needed for the meta tensor calls to get stride info in functionalization
15#include <ATen/ops/empty_strided_native.h>
16// needed for special handling of copy_().
17// See Note [functionalizating copy_() and not preserving strides]
18#include <ATen/ops/to_ops.h>
19#include <ATen/ops/expand_copy_ops.h>
20
21#include <ATen/ops/_cudnn_rnn_flatten_weight_native.h>
22#include <ATen/ops/_cudnn_rnn_flatten_weight_ops.h>
23#include <ATen/ops/_cudnn_rnn_flatten_weight_native.h>
24#include <ATen/ops/_cudnn_rnn_flatten_weight_ops.h>
25#include <ATen/ops/abs_native.h>
26#include <ATen/ops/abs_ops.h>
27#include <ATen/ops/abs_native.h>
28#include <ATen/ops/abs_ops.h>
29#include <ATen/ops/abs_native.h>
30#include <ATen/ops/abs_ops.h>
31#include <ATen/ops/absolute_native.h>
32#include <ATen/ops/absolute_ops.h>
33#include <ATen/ops/absolute_native.h>
34#include <ATen/ops/absolute_ops.h>
35#include <ATen/ops/absolute_native.h>
36#include <ATen/ops/absolute_ops.h>
37#include <ATen/ops/_conj_physical_native.h>
38#include <ATen/ops/_conj_physical_ops.h>
39#include <ATen/ops/_conj_physical_native.h>
40#include <ATen/ops/_conj_physical_ops.h>
41#include <ATen/ops/acos_native.h>
42#include <ATen/ops/acos_ops.h>
43#include <ATen/ops/acos_native.h>
44#include <ATen/ops/acos_ops.h>
45#include <ATen/ops/acos_native.h>
46#include <ATen/ops/acos_ops.h>
47#include <ATen/ops/arccos_native.h>
48#include <ATen/ops/arccos_ops.h>
49#include <ATen/ops/arccos_native.h>
50#include <ATen/ops/arccos_ops.h>
51#include <ATen/ops/arccos_native.h>
52#include <ATen/ops/arccos_ops.h>
53#include <ATen/ops/affine_grid_generator_native.h>
54#include <ATen/ops/affine_grid_generator_ops.h>
55#include <ATen/ops/affine_grid_generator_native.h>
56#include <ATen/ops/affine_grid_generator_ops.h>
57#include <ATen/ops/arange_native.h>
58#include <ATen/ops/arange_ops.h>
59#include <ATen/ops/arange_native.h>
60#include <ATen/ops/arange_ops.h>
61#include <ATen/ops/arange_native.h>
62#include <ATen/ops/arange_ops.h>
63#include <ATen/ops/arange_native.h>
64#include <ATen/ops/arange_ops.h>
65#include <ATen/ops/arccosh_native.h>
66#include <ATen/ops/arccosh_ops.h>
67#include <ATen/ops/arccosh_native.h>
68#include <ATen/ops/arccosh_ops.h>
69#include <ATen/ops/arccosh_native.h>
70#include <ATen/ops/arccosh_ops.h>
71#include <ATen/ops/arcsinh_native.h>
72#include <ATen/ops/arcsinh_ops.h>
73#include <ATen/ops/arcsinh_native.h>
74#include <ATen/ops/arcsinh_ops.h>
75#include <ATen/ops/arcsinh_native.h>
76#include <ATen/ops/arcsinh_ops.h>
77#include <ATen/ops/atanh_native.h>
78#include <ATen/ops/atanh_ops.h>
79#include <ATen/ops/atanh_native.h>
80#include <ATen/ops/atanh_ops.h>
81#include <ATen/ops/atanh_native.h>
82#include <ATen/ops/atanh_ops.h>
83#include <ATen/ops/asin_native.h>
84#include <ATen/ops/asin_ops.h>
85#include <ATen/ops/asin_native.h>
86#include <ATen/ops/asin_ops.h>
87#include <ATen/ops/asin_native.h>
88#include <ATen/ops/asin_ops.h>
89#include <ATen/ops/binary_cross_entropy_backward_native.h>
90#include <ATen/ops/binary_cross_entropy_backward_ops.h>
91#include <ATen/ops/binary_cross_entropy_backward_native.h>
92#include <ATen/ops/binary_cross_entropy_backward_ops.h>
93#include <ATen/ops/binary_cross_entropy_with_logits_native.h>
94#include <ATen/ops/binary_cross_entropy_with_logits_ops.h>
95#include <ATen/ops/binary_cross_entropy_with_logits_native.h>
96#include <ATen/ops/binary_cross_entropy_with_logits_ops.h>
97#include <ATen/ops/logical_not_native.h>
98#include <ATen/ops/logical_not_ops.h>
99#include <ATen/ops/logical_not_native.h>
100#include <ATen/ops/logical_not_ops.h>
101#include <ATen/ops/logical_not_native.h>
102#include <ATen/ops/logical_not_ops.h>
103#include <ATen/ops/logical_and_native.h>
104#include <ATen/ops/logical_and_ops.h>
105#include <ATen/ops/logical_and_native.h>
106#include <ATen/ops/logical_and_ops.h>
107#include <ATen/ops/logical_and_native.h>
108#include <ATen/ops/logical_and_ops.h>
109#include <ATen/ops/concatenate_native.h>
110#include <ATen/ops/concatenate_ops.h>
111#include <ATen/ops/concatenate_native.h>
112#include <ATen/ops/concatenate_ops.h>
113#include <ATen/ops/concatenate_native.h>
114#include <ATen/ops/concatenate_ops.h>
115#include <ATen/ops/concatenate_native.h>
116#include <ATen/ops/concatenate_ops.h>
117#include <ATen/ops/block_diag_native.h>
118#include <ATen/ops/block_diag_ops.h>
119#include <ATen/ops/block_diag_native.h>
120#include <ATen/ops/block_diag_ops.h>
121#include <ATen/ops/chain_matmul_native.h>
122#include <ATen/ops/chain_matmul_ops.h>
123#include <ATen/ops/chain_matmul_native.h>
124#include <ATen/ops/chain_matmul_ops.h>
125#include <ATen/ops/convolution_backward_native.h>
126#include <ATen/ops/convolution_backward_ops.h>
127#include <ATen/ops/convolution_backward_native.h>
128#include <ATen/ops/convolution_backward_ops.h>
129#include <ATen/ops/_copy_from_native.h>
130#include <ATen/ops/_copy_from_ops.h>
131#include <ATen/ops/_copy_from_native.h>
132#include <ATen/ops/_copy_from_ops.h>
133#include <ATen/ops/cosh_native.h>
134#include <ATen/ops/cosh_ops.h>
135#include <ATen/ops/cosh_native.h>
136#include <ATen/ops/cosh_ops.h>
137#include <ATen/ops/cosh_native.h>
138#include <ATen/ops/cosh_ops.h>
139#include <ATen/ops/cudnn_convolution_transpose_native.h>
140#include <ATen/ops/cudnn_convolution_transpose_ops.h>
141#include <ATen/ops/cudnn_convolution_transpose_native.h>
142#include <ATen/ops/cudnn_convolution_transpose_ops.h>
143#include <ATen/ops/_mps_convolution_transpose_native.h>
144#include <ATen/ops/_mps_convolution_transpose_ops.h>
145#include <ATen/ops/_mps_convolution_transpose_native.h>
146#include <ATen/ops/_mps_convolution_transpose_ops.h>
147#include <ATen/ops/cudnn_grid_sampler_native.h>
148#include <ATen/ops/cudnn_grid_sampler_ops.h>
149#include <ATen/ops/cudnn_grid_sampler_native.h>
150#include <ATen/ops/cudnn_grid_sampler_ops.h>
151#include <ATen/ops/cumprod_native.h>
152#include <ATen/ops/cumprod_ops.h>
153#include <ATen/ops/cumprod_native.h>
154#include <ATen/ops/cumprod_ops.h>
155#include <ATen/ops/cumprod_native.h>
156#include <ATen/ops/cumprod_ops.h>
157#include <ATen/ops/cumprod_native.h>
158#include <ATen/ops/cumprod_ops.h>
159#include <ATen/ops/cumprod_native.h>
160#include <ATen/ops/cumprod_ops.h>
161#include <ATen/ops/cumprod_native.h>
162#include <ATen/ops/cumprod_ops.h>
163#include <ATen/ops/diag_embed_native.h>
164#include <ATen/ops/diag_embed_ops.h>
165#include <ATen/ops/diag_embed_native.h>
166#include <ATen/ops/diag_embed_ops.h>
167#include <ATen/ops/diagonal_backward_native.h>
168#include <ATen/ops/diagonal_backward_ops.h>
169#include <ATen/ops/diagonal_backward_native.h>
170#include <ATen/ops/diagonal_backward_ops.h>
171#include <ATen/ops/div_native.h>
172#include <ATen/ops/div_ops.h>
173#include <ATen/ops/div_native.h>
174#include <ATen/ops/div_ops.h>
175#include <ATen/ops/div_native.h>
176#include <ATen/ops/div_ops.h>
177#include <ATen/ops/div_native.h>
178#include <ATen/ops/div_ops.h>
179#include <ATen/ops/div_native.h>
180#include <ATen/ops/div_ops.h>
181#include <ATen/ops/div_native.h>
182#include <ATen/ops/div_ops.h>
183#include <ATen/ops/div_native.h>
184#include <ATen/ops/div_ops.h>
185#include <ATen/ops/div_native.h>
186#include <ATen/ops/div_ops.h>
187#include <ATen/ops/div_native.h>
188#include <ATen/ops/div_ops.h>
189#include <ATen/ops/div_native.h>
190#include <ATen/ops/div_ops.h>
191#include <ATen/ops/div_native.h>
192#include <ATen/ops/div_ops.h>
193#include <ATen/ops/div_native.h>
194#include <ATen/ops/div_ops.h>
195#include <ATen/ops/_embedding_bag_native.h>
196#include <ATen/ops/_embedding_bag_ops.h>
197#include <ATen/ops/_embedding_bag_native.h>
198#include <ATen/ops/_embedding_bag_ops.h>
199#include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h>
200#include <ATen/ops/_embedding_bag_per_sample_weights_backward_ops.h>
201#include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h>
202#include <ATen/ops/_embedding_bag_per_sample_weights_backward_ops.h>
203#include <ATen/ops/new_full_native.h>
204#include <ATen/ops/new_full_ops.h>
205#include <ATen/ops/new_full_native.h>
206#include <ATen/ops/new_full_ops.h>
207#include <ATen/ops/empty_quantized_native.h>
208#include <ATen/ops/empty_quantized_ops.h>
209#include <ATen/ops/empty_quantized_native.h>
210#include <ATen/ops/empty_quantized_ops.h>
211#include <ATen/ops/empty_strided_native.h>
212#include <ATen/ops/empty_strided_ops.h>
213#include <ATen/ops/empty_strided_native.h>
214#include <ATen/ops/empty_strided_ops.h>
215#include <ATen/ops/exp_native.h>
216#include <ATen/ops/exp_ops.h>
217#include <ATen/ops/exp_native.h>
218#include <ATen/ops/exp_ops.h>
219#include <ATen/ops/exp_native.h>
220#include <ATen/ops/exp_ops.h>
221#include <ATen/ops/expm1_native.h>
222#include <ATen/ops/expm1_ops.h>
223#include <ATen/ops/expm1_native.h>
224#include <ATen/ops/expm1_ops.h>
225#include <ATen/ops/expm1_native.h>
226#include <ATen/ops/expm1_ops.h>
227#include <ATen/ops/fill_native.h>
228#include <ATen/ops/fill_ops.h>
229#include <ATen/ops/fill_native.h>
230#include <ATen/ops/fill_ops.h>
231#include <ATen/ops/fill_native.h>
232#include <ATen/ops/fill_ops.h>
233#include <ATen/ops/fill_native.h>
234#include <ATen/ops/fill_ops.h>
235#include <ATen/ops/fill_native.h>
236#include <ATen/ops/fill_ops.h>
237#include <ATen/ops/fill_native.h>
238#include <ATen/ops/fill_ops.h>
239#include <ATen/ops/floor_native.h>
240#include <ATen/ops/floor_ops.h>
241#include <ATen/ops/floor_native.h>
242#include <ATen/ops/floor_ops.h>
243#include <ATen/ops/floor_native.h>
244#include <ATen/ops/floor_ops.h>
245#include <ATen/ops/floor_divide_native.h>
246#include <ATen/ops/floor_divide_ops.h>
247#include <ATen/ops/floor_divide_native.h>
248#include <ATen/ops/floor_divide_ops.h>
249#include <ATen/ops/floor_divide_native.h>
250#include <ATen/ops/floor_divide_ops.h>
251#include <ATen/ops/full_native.h>
252#include <ATen/ops/full_ops.h>
253#include <ATen/ops/full_native.h>
254#include <ATen/ops/full_ops.h>
255#include <ATen/ops/full_native.h>
256#include <ATen/ops/full_ops.h>
257#include <ATen/ops/full_native.h>
258#include <ATen/ops/full_ops.h>
259#include <ATen/ops/from_file_native.h>
260#include <ATen/ops/from_file_ops.h>
261#include <ATen/ops/from_file_native.h>
262#include <ATen/ops/from_file_ops.h>
263#include <ATen/ops/grid_sampler_2d_native.h>
264#include <ATen/ops/grid_sampler_2d_ops.h>
265#include <ATen/ops/grid_sampler_2d_native.h>
266#include <ATen/ops/grid_sampler_2d_ops.h>
267#include <ATen/ops/grid_sampler_3d_native.h>
268#include <ATen/ops/grid_sampler_3d_ops.h>
269#include <ATen/ops/grid_sampler_3d_native.h>
270#include <ATen/ops/grid_sampler_3d_ops.h>
271#include <ATen/ops/hamming_window_native.h>
272#include <ATen/ops/hamming_window_ops.h>
273#include <ATen/ops/hamming_window_native.h>
274#include <ATen/ops/hamming_window_ops.h>
275#include <ATen/ops/hamming_window_native.h>
276#include <ATen/ops/hamming_window_ops.h>
277#include <ATen/ops/hamming_window_native.h>
278#include <ATen/ops/hamming_window_ops.h>
279#include <ATen/ops/hamming_window_native.h>
280#include <ATen/ops/hamming_window_ops.h>
281#include <ATen/ops/hamming_window_native.h>
282#include <ATen/ops/hamming_window_ops.h>
283#include <ATen/ops/hamming_window_native.h>
284#include <ATen/ops/hamming_window_ops.h>
285#include <ATen/ops/hamming_window_native.h>
286#include <ATen/ops/hamming_window_ops.h>
287#include <ATen/ops/native_group_norm_native.h>
288#include <ATen/ops/native_group_norm_ops.h>
289#include <ATen/ops/native_group_norm_native.h>
290#include <ATen/ops/native_group_norm_ops.h>
291#include <ATen/ops/_fft_c2r_native.h>
292#include <ATen/ops/_fft_c2r_ops.h>
293#include <ATen/ops/_fft_c2r_native.h>
294#include <ATen/ops/_fft_c2r_ops.h>
295#include <ATen/ops/isnan_native.h>
296#include <ATen/ops/isnan_ops.h>
297#include <ATen/ops/isnan_native.h>
298#include <ATen/ops/isnan_ops.h>
299#include <ATen/ops/ldexp_native.h>
300#include <ATen/ops/ldexp_ops.h>
301#include <ATen/ops/ldexp_native.h>
302#include <ATen/ops/ldexp_ops.h>
303#include <ATen/ops/ldexp_native.h>
304#include <ATen/ops/ldexp_ops.h>
305#include <ATen/ops/log2_native.h>
306#include <ATen/ops/log2_ops.h>
307#include <ATen/ops/log2_native.h>
308#include <ATen/ops/log2_ops.h>
309#include <ATen/ops/log2_native.h>
310#include <ATen/ops/log2_ops.h>
311#include <ATen/ops/logaddexp2_native.h>
312#include <ATen/ops/logaddexp2_ops.h>
313#include <ATen/ops/logaddexp2_native.h>
314#include <ATen/ops/logaddexp2_ops.h>
315#include <ATen/ops/xlogy_native.h>
316#include <ATen/ops/xlogy_ops.h>
317#include <ATen/ops/xlogy_native.h>
318#include <ATen/ops/xlogy_ops.h>
319#include <ATen/ops/xlogy_native.h>
320#include <ATen/ops/xlogy_ops.h>
321#include <ATen/ops/xlogy_native.h>
322#include <ATen/ops/xlogy_ops.h>
323#include <ATen/ops/xlogy_native.h>
324#include <ATen/ops/xlogy_ops.h>
325#include <ATen/ops/xlogy_native.h>
326#include <ATen/ops/xlogy_ops.h>
327#include <ATen/ops/xlogy_native.h>
328#include <ATen/ops/xlogy_ops.h>
329#include <ATen/ops/xlogy_native.h>
330#include <ATen/ops/xlogy_ops.h>
331#include <ATen/ops/logspace_native.h>
332#include <ATen/ops/logspace_ops.h>
333#include <ATen/ops/logspace_native.h>
334#include <ATen/ops/logspace_ops.h>
335#include <ATen/ops/matrix_power_native.h>
336#include <ATen/ops/matrix_power_ops.h>
337#include <ATen/ops/matrix_power_native.h>
338#include <ATen/ops/matrix_power_ops.h>
339#include <ATen/ops/_aminmax_native.h>
340#include <ATen/ops/_aminmax_ops.h>
341#include <ATen/ops/_aminmax_native.h>
342#include <ATen/ops/_aminmax_ops.h>
343#include <ATen/ops/_aminmax_native.h>
344#include <ATen/ops/_aminmax_ops.h>
345#include <ATen/ops/_aminmax_native.h>
346#include <ATen/ops/_aminmax_ops.h>
347#include <ATen/ops/aminmax_native.h>
348#include <ATen/ops/aminmax_ops.h>
349#include <ATen/ops/aminmax_native.h>
350#include <ATen/ops/aminmax_ops.h>
351#include <ATen/ops/_compute_linear_combination_native.h>
352#include <ATen/ops/_compute_linear_combination_ops.h>
353#include <ATen/ops/_compute_linear_combination_native.h>
354#include <ATen/ops/_compute_linear_combination_ops.h>
355#include <ATen/ops/_mps_max_pool2d_native.h>
356#include <ATen/ops/_mps_max_pool2d_ops.h>
357#include <ATen/ops/_mps_max_pool2d_native.h>
358#include <ATen/ops/_mps_max_pool2d_ops.h>
359#include <ATen/ops/mkldnn_max_pool3d_backward_native.h>
360#include <ATen/ops/mkldnn_max_pool3d_backward_ops.h>
361#include <ATen/ops/mkldnn_max_pool3d_backward_native.h>
362#include <ATen/ops/mkldnn_max_pool3d_backward_ops.h>
363#include <ATen/ops/min_native.h>
364#include <ATen/ops/min_ops.h>
365#include <ATen/ops/min_native.h>
366#include <ATen/ops/min_ops.h>
367#include <ATen/ops/min_native.h>
368#include <ATen/ops/min_ops.h>
369#include <ATen/ops/min_native.h>
370#include <ATen/ops/min_ops.h>
371#include <ATen/ops/mps_convolution_backward_native.h>
372#include <ATen/ops/mps_convolution_backward_ops.h>
373#include <ATen/ops/mps_convolution_backward_native.h>
374#include <ATen/ops/mps_convolution_backward_ops.h>
375#include <ATen/ops/miopen_rnn_native.h>
376#include <ATen/ops/miopen_rnn_ops.h>
377#include <ATen/ops/miopen_rnn_native.h>
378#include <ATen/ops/miopen_rnn_ops.h>
379#include <ATen/ops/mv_native.h>
380#include <ATen/ops/mv_ops.h>
381#include <ATen/ops/mv_native.h>
382#include <ATen/ops/mv_ops.h>
383#include <ATen/ops/_native_batch_norm_legit_native.h>
384#include <ATen/ops/_native_batch_norm_legit_ops.h>
385#include <ATen/ops/_native_batch_norm_legit_native.h>
386#include <ATen/ops/_native_batch_norm_legit_ops.h>
387#include <ATen/ops/_native_batch_norm_legit_native.h>
388#include <ATen/ops/_native_batch_norm_legit_ops.h>
389#include <ATen/ops/_native_batch_norm_legit_native.h>
390#include <ATen/ops/_native_batch_norm_legit_ops.h>
391#include <ATen/ops/_native_batch_norm_legit_native.h>
392#include <ATen/ops/_native_batch_norm_legit_ops.h>
393#include <ATen/ops/batch_norm_stats_native.h>
394#include <ATen/ops/batch_norm_stats_ops.h>
395#include <ATen/ops/batch_norm_stats_native.h>
396#include <ATen/ops/batch_norm_stats_ops.h>
397#include <ATen/ops/batch_norm_backward_elemt_native.h>
398#include <ATen/ops/batch_norm_backward_elemt_ops.h>
399#include <ATen/ops/batch_norm_backward_elemt_native.h>
400#include <ATen/ops/batch_norm_backward_elemt_ops.h>
401#include <ATen/ops/_euclidean_dist_native.h>
402#include <ATen/ops/_euclidean_dist_ops.h>
403#include <ATen/ops/_euclidean_dist_native.h>
404#include <ATen/ops/_euclidean_dist_ops.h>
405#include <ATen/ops/_cdist_forward_native.h>
406#include <ATen/ops/_cdist_forward_ops.h>
407#include <ATen/ops/_cdist_forward_native.h>
408#include <ATen/ops/_cdist_forward_ops.h>
409#include <ATen/ops/_cdist_backward_native.h>
410#include <ATen/ops/_cdist_backward_ops.h>
411#include <ATen/ops/_cdist_backward_native.h>
412#include <ATen/ops/_cdist_backward_ops.h>
413#include <ATen/ops/pixel_unshuffle_native.h>
414#include <ATen/ops/pixel_unshuffle_ops.h>
415#include <ATen/ops/pixel_unshuffle_native.h>
416#include <ATen/ops/pixel_unshuffle_ops.h>
417#include <ATen/ops/rad2deg_native.h>
418#include <ATen/ops/rad2deg_ops.h>
419#include <ATen/ops/rad2deg_native.h>
420#include <ATen/ops/rad2deg_ops.h>
421#include <ATen/ops/rad2deg_native.h>
422#include <ATen/ops/rad2deg_ops.h>
423#include <ATen/ops/scalar_tensor_native.h>
424#include <ATen/ops/scalar_tensor_ops.h>
425#include <ATen/ops/scalar_tensor_native.h>
426#include <ATen/ops/scalar_tensor_ops.h>
427#include <ATen/ops/rand_native.h>
428#include <ATen/ops/rand_ops.h>
429#include <ATen/ops/rand_native.h>
430#include <ATen/ops/rand_ops.h>
431#include <ATen/ops/rand_native.h>
432#include <ATen/ops/rand_ops.h>
433#include <ATen/ops/rand_native.h>
434#include <ATen/ops/rand_ops.h>
435#include <ATen/ops/rand_native.h>
436#include <ATen/ops/rand_ops.h>
437#include <ATen/ops/rand_native.h>
438#include <ATen/ops/rand_ops.h>
439#include <ATen/ops/rand_native.h>
440#include <ATen/ops/rand_ops.h>
441#include <ATen/ops/rand_native.h>
442#include <ATen/ops/rand_ops.h>
443#include <ATen/ops/rand_like_native.h>
444#include <ATen/ops/rand_like_ops.h>
445#include <ATen/ops/rand_like_native.h>
446#include <ATen/ops/rand_like_ops.h>
447#include <ATen/ops/relu_native.h>
448#include <ATen/ops/relu_ops.h>
449#include <ATen/ops/relu_native.h>
450#include <ATen/ops/relu_ops.h>
451#include <ATen/ops/relu_native.h>
452#include <ATen/ops/relu_ops.h>
453#include <ATen/ops/logit_native.h>
454#include <ATen/ops/logit_ops.h>
455#include <ATen/ops/logit_native.h>
456#include <ATen/ops/logit_ops.h>
457#include <ATen/ops/logit_native.h>
458#include <ATen/ops/logit_ops.h>
459#include <ATen/ops/select_scatter_native.h>
460#include <ATen/ops/select_scatter_ops.h>
461#include <ATen/ops/select_scatter_native.h>
462#include <ATen/ops/select_scatter_ops.h>
463#include <ATen/ops/softmax_native.h>
464#include <ATen/ops/softmax_ops.h>
465#include <ATen/ops/softmax_native.h>
466#include <ATen/ops/softmax_ops.h>
467#include <ATen/ops/stack_native.h>
468#include <ATen/ops/stack_ops.h>
469#include <ATen/ops/stack_native.h>
470#include <ATen/ops/stack_ops.h>
471#include <ATen/ops/vstack_native.h>
472#include <ATen/ops/vstack_ops.h>
473#include <ATen/ops/vstack_native.h>
474#include <ATen/ops/vstack_ops.h>
475#include <ATen/ops/nansum_native.h>
476#include <ATen/ops/nansum_ops.h>
477#include <ATen/ops/nansum_native.h>
478#include <ATen/ops/nansum_ops.h>
479#include <ATen/ops/sqrt_native.h>
480#include <ATen/ops/sqrt_ops.h>
481#include <ATen/ops/sqrt_native.h>
482#include <ATen/ops/sqrt_ops.h>
483#include <ATen/ops/sqrt_native.h>
484#include <ATen/ops/sqrt_ops.h>
485#include <ATen/ops/prod_native.h>
486#include <ATen/ops/prod_ops.h>
487#include <ATen/ops/prod_native.h>
488#include <ATen/ops/prod_ops.h>
489#include <ATen/ops/prod_native.h>
490#include <ATen/ops/prod_ops.h>
491#include <ATen/ops/prod_native.h>
492#include <ATen/ops/prod_ops.h>
493#include <ATen/ops/prod_native.h>
494#include <ATen/ops/prod_ops.h>
495#include <ATen/ops/prod_native.h>
496#include <ATen/ops/prod_ops.h>
497#include <ATen/ops/threshold_backward_native.h>
498#include <ATen/ops/threshold_backward_ops.h>
499#include <ATen/ops/threshold_backward_native.h>
500#include <ATen/ops/threshold_backward_ops.h>
501#include <ATen/ops/_transform_bias_rescale_qkv_native.h>
502#include <ATen/ops/_transform_bias_rescale_qkv_ops.h>
503#include <ATen/ops/_transform_bias_rescale_qkv_native.h>
504#include <ATen/ops/_transform_bias_rescale_qkv_ops.h>
505#include <ATen/ops/_nested_from_padded_native.h>
506#include <ATen/ops/_nested_from_padded_ops.h>
507#include <ATen/ops/_nested_from_padded_native.h>
508#include <ATen/ops/_nested_from_padded_ops.h>
509#include <ATen/ops/_nested_tensor_size_native.h>
510#include <ATen/ops/_nested_tensor_size_ops.h>
511#include <ATen/ops/_nested_tensor_size_native.h>
512#include <ATen/ops/_nested_tensor_size_ops.h>
513#include <ATen/ops/_nested_view_from_buffer_copy_native.h>
514#include <ATen/ops/_nested_view_from_buffer_copy_ops.h>
515#include <ATen/ops/_nested_view_from_buffer_copy_native.h>
516#include <ATen/ops/_nested_view_from_buffer_copy_ops.h>
517#include <ATen/ops/trunc_native.h>
518#include <ATen/ops/trunc_ops.h>
519#include <ATen/ops/trunc_native.h>
520#include <ATen/ops/trunc_ops.h>
521#include <ATen/ops/trunc_native.h>
522#include <ATen/ops/trunc_ops.h>
523#include <ATen/ops/unique_dim_consecutive_native.h>
524#include <ATen/ops/unique_dim_consecutive_ops.h>
525#include <ATen/ops/unique_dim_consecutive_native.h>
526#include <ATen/ops/unique_dim_consecutive_ops.h>
527#include <ATen/ops/where_native.h>
528#include <ATen/ops/where_ops.h>
529#include <ATen/ops/where_native.h>
530#include <ATen/ops/where_ops.h>
531#include <ATen/ops/_weight_norm_interface_backward_native.h>
532#include <ATen/ops/_weight_norm_interface_backward_ops.h>
533#include <ATen/ops/_weight_norm_interface_backward_native.h>
534#include <ATen/ops/_weight_norm_interface_backward_ops.h>
535#include <ATen/ops/_sample_dirichlet_native.h>
536#include <ATen/ops/_sample_dirichlet_ops.h>
537#include <ATen/ops/_sample_dirichlet_native.h>
538#include <ATen/ops/_sample_dirichlet_ops.h>
539#include <ATen/ops/binomial_native.h>
540#include <ATen/ops/binomial_ops.h>
541#include <ATen/ops/binomial_native.h>
542#include <ATen/ops/binomial_ops.h>
543#include <ATen/ops/native_norm_native.h>
544#include <ATen/ops/native_norm_ops.h>
545#include <ATen/ops/native_norm_native.h>
546#include <ATen/ops/native_norm_ops.h>
547#include <ATen/ops/native_norm_native.h>
548#include <ATen/ops/native_norm_ops.h>
549#include <ATen/ops/native_norm_native.h>
550#include <ATen/ops/native_norm_ops.h>
551#include <ATen/ops/_sparse_sum_native.h>
552#include <ATen/ops/_sparse_sum_ops.h>
553#include <ATen/ops/_sparse_sum_native.h>
554#include <ATen/ops/_sparse_sum_ops.h>
555#include <ATen/ops/_sparse_sum_backward_native.h>
556#include <ATen/ops/_sparse_sum_backward_ops.h>
557#include <ATen/ops/_sparse_sum_backward_native.h>
558#include <ATen/ops/_sparse_sum_backward_ops.h>
559#include <ATen/ops/_sparse_softmax_native.h>
560#include <ATen/ops/_sparse_softmax_ops.h>
561#include <ATen/ops/_sparse_softmax_native.h>
562#include <ATen/ops/_sparse_softmax_ops.h>
563#include <ATen/ops/clone_native.h>
564#include <ATen/ops/clone_ops.h>
565#include <ATen/ops/clone_native.h>
566#include <ATen/ops/clone_ops.h>
567#include <ATen/ops/resize_as_native.h>
568#include <ATen/ops/resize_as_ops.h>
569#include <ATen/ops/resize_as_native.h>
570#include <ATen/ops/resize_as_ops.h>
571#include <ATen/ops/resize_as_native.h>
572#include <ATen/ops/resize_as_ops.h>
573#include <ATen/ops/zero_native.h>
574#include <ATen/ops/zero_ops.h>
575#include <ATen/ops/zero_native.h>
576#include <ATen/ops/zero_ops.h>
577#include <ATen/ops/zero_native.h>
578#include <ATen/ops/zero_ops.h>
579#include <ATen/ops/heaviside_native.h>
580#include <ATen/ops/heaviside_ops.h>
581#include <ATen/ops/heaviside_native.h>
582#include <ATen/ops/heaviside_ops.h>
583#include <ATen/ops/heaviside_native.h>
584#include <ATen/ops/heaviside_ops.h>
585#include <ATen/ops/addmm_native.h>
586#include <ATen/ops/addmm_ops.h>
587#include <ATen/ops/addmm_native.h>
588#include <ATen/ops/addmm_ops.h>
589#include <ATen/ops/addmm_native.h>
590#include <ATen/ops/addmm_ops.h>
591#include <ATen/ops/_sparse_coo_tensor_with_dims_native.h>
592#include <ATen/ops/_sparse_coo_tensor_with_dims_ops.h>
593#include <ATen/ops/_sparse_coo_tensor_with_dims_native.h>
594#include <ATen/ops/_sparse_coo_tensor_with_dims_ops.h>
595#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h>
596#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_ops.h>
597#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h>
598#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_ops.h>
599#include <ATen/ops/sparse_resize_and_clear_native.h>
600#include <ATen/ops/sparse_resize_and_clear_ops.h>
601#include <ATen/ops/sparse_resize_and_clear_native.h>
602#include <ATen/ops/sparse_resize_and_clear_ops.h>
603#include <ATen/ops/sparse_resize_and_clear_native.h>
604#include <ATen/ops/sparse_resize_and_clear_ops.h>
605#include <ATen/ops/hspmm_native.h>
606#include <ATen/ops/hspmm_ops.h>
607#include <ATen/ops/hspmm_native.h>
608#include <ATen/ops/hspmm_ops.h>
609#include <ATen/ops/to_sparse_native.h>
610#include <ATen/ops/to_sparse_ops.h>
611#include <ATen/ops/to_sparse_native.h>
612#include <ATen/ops/to_sparse_ops.h>
613#include <ATen/ops/to_sparse_native.h>
614#include <ATen/ops/to_sparse_ops.h>
615#include <ATen/ops/to_sparse_native.h>
616#include <ATen/ops/to_sparse_ops.h>
617#include <ATen/ops/to_sparse_bsr_native.h>
618#include <ATen/ops/to_sparse_bsr_ops.h>
619#include <ATen/ops/to_sparse_bsr_native.h>
620#include <ATen/ops/to_sparse_bsr_ops.h>
621#include <ATen/ops/to_mkldnn_native.h>
622#include <ATen/ops/to_mkldnn_ops.h>
623#include <ATen/ops/to_mkldnn_native.h>
624#include <ATen/ops/to_mkldnn_ops.h>
625#include <ATen/ops/mkldnn_reorder_conv3d_weight_native.h>
626#include <ATen/ops/mkldnn_reorder_conv3d_weight_ops.h>
627#include <ATen/ops/mkldnn_reorder_conv3d_weight_native.h>
628#include <ATen/ops/mkldnn_reorder_conv3d_weight_ops.h>
629#include <ATen/ops/q_per_channel_scales_native.h>
630#include <ATen/ops/q_per_channel_scales_ops.h>
631#include <ATen/ops/q_per_channel_scales_native.h>
632#include <ATen/ops/q_per_channel_scales_ops.h>
633#include <ATen/ops/int_repr_native.h>
634#include <ATen/ops/int_repr_ops.h>
635#include <ATen/ops/int_repr_native.h>
636#include <ATen/ops/int_repr_ops.h>
637#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
638#include <ATen/ops/_make_per_tensor_quantized_tensor_ops.h>
639#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
640#include <ATen/ops/_make_per_tensor_quantized_tensor_ops.h>
641#include <ATen/ops/_thnn_fused_lstm_cell_native.h>
642#include <ATen/ops/_thnn_fused_lstm_cell_ops.h>
643#include <ATen/ops/_thnn_fused_lstm_cell_native.h>
644#include <ATen/ops/_thnn_fused_lstm_cell_ops.h>
645#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_native.h>
646#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_ops.h>
647#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_native.h>
648#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_ops.h>
649#include <ATen/ops/masked_fill_native.h>
650#include <ATen/ops/masked_fill_ops.h>
651#include <ATen/ops/masked_fill_native.h>
652#include <ATen/ops/masked_fill_ops.h>
653#include <ATen/ops/masked_fill_native.h>
654#include <ATen/ops/masked_fill_ops.h>
655#include <ATen/ops/masked_fill_native.h>
656#include <ATen/ops/masked_fill_ops.h>
657#include <ATen/ops/masked_fill_native.h>
658#include <ATen/ops/masked_fill_ops.h>
659#include <ATen/ops/masked_fill_native.h>
660#include <ATen/ops/masked_fill_ops.h>
661#include <ATen/ops/_masked_softmax_native.h>
662#include <ATen/ops/_masked_softmax_ops.h>
663#include <ATen/ops/_masked_softmax_native.h>
664#include <ATen/ops/_masked_softmax_ops.h>
665#include <ATen/ops/bitwise_right_shift_native.h>
666#include <ATen/ops/bitwise_right_shift_ops.h>
667#include <ATen/ops/bitwise_right_shift_native.h>
668#include <ATen/ops/bitwise_right_shift_ops.h>
669#include <ATen/ops/bitwise_right_shift_native.h>
670#include <ATen/ops/bitwise_right_shift_ops.h>
671#include <ATen/ops/bitwise_right_shift_native.h>
672#include <ATen/ops/bitwise_right_shift_ops.h>
673#include <ATen/ops/bitwise_right_shift_native.h>
674#include <ATen/ops/bitwise_right_shift_ops.h>
675#include <ATen/ops/bitwise_right_shift_native.h>
676#include <ATen/ops/bitwise_right_shift_ops.h>
677#include <ATen/ops/bitwise_right_shift_native.h>
678#include <ATen/ops/bitwise_right_shift_ops.h>
679#include <ATen/ops/bitwise_right_shift_native.h>
680#include <ATen/ops/bitwise_right_shift_ops.h>
681#include <ATen/ops/cauchy_native.h>
682#include <ATen/ops/cauchy_ops.h>
683#include <ATen/ops/cauchy_native.h>
684#include <ATen/ops/cauchy_ops.h>
685#include <ATen/ops/cauchy_native.h>
686#include <ATen/ops/cauchy_ops.h>
687#include <ATen/ops/log_normal_native.h>
688#include <ATen/ops/log_normal_ops.h>
689#include <ATen/ops/log_normal_native.h>
690#include <ATen/ops/log_normal_ops.h>
691#include <ATen/ops/log_normal_native.h>
692#include <ATen/ops/log_normal_ops.h>
693#include <ATen/ops/diag_native.h>
694#include <ATen/ops/diag_ops.h>
695#include <ATen/ops/diag_native.h>
696#include <ATen/ops/diag_ops.h>
697#include <ATen/ops/ne_native.h>
698#include <ATen/ops/ne_ops.h>
699#include <ATen/ops/ne_native.h>
700#include <ATen/ops/ne_ops.h>
701#include <ATen/ops/ne_native.h>
702#include <ATen/ops/ne_ops.h>
703#include <ATen/ops/ne_native.h>
704#include <ATen/ops/ne_ops.h>
705#include <ATen/ops/ne_native.h>
706#include <ATen/ops/ne_ops.h>
707#include <ATen/ops/ne_native.h>
708#include <ATen/ops/ne_ops.h>
709#include <ATen/ops/not_equal_native.h>
710#include <ATen/ops/not_equal_ops.h>
711#include <ATen/ops/not_equal_native.h>
712#include <ATen/ops/not_equal_ops.h>
713#include <ATen/ops/not_equal_native.h>
714#include <ATen/ops/not_equal_ops.h>
715#include <ATen/ops/not_equal_native.h>
716#include <ATen/ops/not_equal_ops.h>
717#include <ATen/ops/not_equal_native.h>
718#include <ATen/ops/not_equal_ops.h>
719#include <ATen/ops/not_equal_native.h>
720#include <ATen/ops/not_equal_ops.h>
721#include <ATen/ops/addcmul_native.h>
722#include <ATen/ops/addcmul_ops.h>
723#include <ATen/ops/addcmul_native.h>
724#include <ATen/ops/addcmul_ops.h>
725#include <ATen/ops/addcmul_native.h>
726#include <ATen/ops/addcmul_ops.h>
727#include <ATen/ops/ormqr_native.h>
728#include <ATen/ops/ormqr_ops.h>
729#include <ATen/ops/ormqr_native.h>
730#include <ATen/ops/ormqr_ops.h>
731#include <ATen/ops/lu_unpack_native.h>
732#include <ATen/ops/lu_unpack_ops.h>
733#include <ATen/ops/lu_unpack_native.h>
734#include <ATen/ops/lu_unpack_ops.h>
735#include <ATen/ops/dist_native.h>
736#include <ATen/ops/dist_ops.h>
737#include <ATen/ops/dist_native.h>
738#include <ATen/ops/dist_ops.h>
739#include <ATen/ops/arctan2_native.h>
740#include <ATen/ops/arctan2_ops.h>
741#include <ATen/ops/arctan2_native.h>
742#include <ATen/ops/arctan2_ops.h>
743#include <ATen/ops/arctan2_native.h>
744#include <ATen/ops/arctan2_ops.h>
745#include <ATen/ops/histc_native.h>
746#include <ATen/ops/histc_ops.h>
747#include <ATen/ops/histc_native.h>
748#include <ATen/ops/histc_ops.h>
749#include <ATen/ops/_histogramdd_from_bin_cts_native.h>
750#include <ATen/ops/_histogramdd_from_bin_cts_ops.h>
751#include <ATen/ops/_histogramdd_from_bin_cts_native.h>
752#include <ATen/ops/_histogramdd_from_bin_cts_ops.h>
753#include <ATen/ops/hypot_native.h>
754#include <ATen/ops/hypot_ops.h>
755#include <ATen/ops/hypot_native.h>
756#include <ATen/ops/hypot_ops.h>
757#include <ATen/ops/hypot_native.h>
758#include <ATen/ops/hypot_ops.h>
759#include <ATen/ops/min_native.h>
760#include <ATen/ops/min_ops.h>
761#include <ATen/ops/min_native.h>
762#include <ATen/ops/min_ops.h>
763#include <ATen/ops/msort_native.h>
764#include <ATen/ops/msort_ops.h>
765#include <ATen/ops/msort_native.h>
766#include <ATen/ops/msort_ops.h>
767#include <ATen/ops/_foreach_add_native.h>
768#include <ATen/ops/_foreach_add_ops.h>
769#include <ATen/ops/_foreach_add_native.h>
770#include <ATen/ops/_foreach_add_ops.h>
771#include <ATen/ops/_foreach_add_native.h>
772#include <ATen/ops/_foreach_add_ops.h>
773#include <ATen/ops/_foreach_clamp_min_native.h>
774#include <ATen/ops/_foreach_clamp_min_ops.h>
775#include <ATen/ops/_foreach_clamp_min_native.h>
776#include <ATen/ops/_foreach_clamp_min_ops.h>
777#include <ATen/ops/_foreach_clamp_min_native.h>
778#include <ATen/ops/_foreach_clamp_min_ops.h>
779#include <ATen/ops/_foreach_add_native.h>
780#include <ATen/ops/_foreach_add_ops.h>
781#include <ATen/ops/_foreach_add_native.h>
782#include <ATen/ops/_foreach_add_ops.h>
783#include <ATen/ops/_foreach_add_native.h>
784#include <ATen/ops/_foreach_add_ops.h>
785#include <ATen/ops/_foreach_clamp_min_native.h>
786#include <ATen/ops/_foreach_clamp_min_ops.h>
787#include <ATen/ops/_foreach_clamp_min_native.h>
788#include <ATen/ops/_foreach_clamp_min_ops.h>
789#include <ATen/ops/_foreach_clamp_min_native.h>
790#include <ATen/ops/_foreach_clamp_min_ops.h>
791#include <ATen/ops/_foreach_add_native.h>
792#include <ATen/ops/_foreach_add_ops.h>
793#include <ATen/ops/_foreach_add_native.h>
794#include <ATen/ops/_foreach_add_ops.h>
795#include <ATen/ops/_foreach_add_native.h>
796#include <ATen/ops/_foreach_add_ops.h>
797#include <ATen/ops/_foreach_clamp_min_native.h>
798#include <ATen/ops/_foreach_clamp_min_ops.h>
799#include <ATen/ops/_foreach_clamp_min_native.h>
800#include <ATen/ops/_foreach_clamp_min_ops.h>
801#include <ATen/ops/_foreach_clamp_min_native.h>
802#include <ATen/ops/_foreach_clamp_min_ops.h>
803#include <ATen/ops/_foreach_zero_native.h>
804#include <ATen/ops/_foreach_zero_ops.h>
805#include <ATen/ops/_foreach_zero_native.h>
806#include <ATen/ops/_foreach_zero_ops.h>
807#include <ATen/ops/_foreach_zero_native.h>
808#include <ATen/ops/_foreach_zero_ops.h>
809#include <ATen/ops/_foreach_asin_native.h>
810#include <ATen/ops/_foreach_asin_ops.h>
811#include <ATen/ops/_foreach_asin_native.h>
812#include <ATen/ops/_foreach_asin_ops.h>
813#include <ATen/ops/_foreach_asin_native.h>
814#include <ATen/ops/_foreach_asin_ops.h>
815#include <ATen/ops/_foreach_ceil_native.h>
816#include <ATen/ops/_foreach_ceil_ops.h>
817#include <ATen/ops/_foreach_ceil_native.h>
818#include <ATen/ops/_foreach_ceil_ops.h>
819#include <ATen/ops/_foreach_ceil_native.h>
820#include <ATen/ops/_foreach_ceil_ops.h>
821#include <ATen/ops/_foreach_cosh_native.h>
822#include <ATen/ops/_foreach_cosh_ops.h>
823#include <ATen/ops/_foreach_cosh_native.h>
824#include <ATen/ops/_foreach_cosh_ops.h>
825#include <ATen/ops/_foreach_cosh_native.h>
826#include <ATen/ops/_foreach_cosh_ops.h>
827#include <ATen/ops/_foreach_log1p_native.h>
828#include <ATen/ops/_foreach_log1p_ops.h>
829#include <ATen/ops/_foreach_log1p_native.h>
830#include <ATen/ops/_foreach_log1p_ops.h>
831#include <ATen/ops/_foreach_log1p_native.h>
832#include <ATen/ops/_foreach_log1p_ops.h>
833#include <ATen/ops/_foreach_log2_native.h>
834#include <ATen/ops/_foreach_log2_ops.h>
835#include <ATen/ops/_foreach_log2_native.h>
836#include <ATen/ops/_foreach_log2_ops.h>
837#include <ATen/ops/_foreach_log2_native.h>
838#include <ATen/ops/_foreach_log2_ops.h>
839#include <ATen/ops/_foreach_round_native.h>
840#include <ATen/ops/_foreach_round_ops.h>
841#include <ATen/ops/_foreach_round_native.h>
842#include <ATen/ops/_foreach_round_ops.h>
843#include <ATen/ops/_foreach_round_native.h>
844#include <ATen/ops/_foreach_round_ops.h>
845#include <ATen/ops/_foreach_addcdiv_native.h>
846#include <ATen/ops/_foreach_addcdiv_ops.h>
847#include <ATen/ops/_foreach_addcdiv_native.h>
848#include <ATen/ops/_foreach_addcdiv_ops.h>
849#include <ATen/ops/_foreach_addcdiv_native.h>
850#include <ATen/ops/_foreach_addcdiv_ops.h>
851#include <ATen/ops/_foreach_addcmul_native.h>
852#include <ATen/ops/_foreach_addcmul_ops.h>
853#include <ATen/ops/_foreach_addcmul_native.h>
854#include <ATen/ops/_foreach_addcmul_ops.h>
855#include <ATen/ops/_foreach_addcmul_native.h>
856#include <ATen/ops/_foreach_addcmul_ops.h>
857#include <ATen/ops/_foreach_addcdiv_native.h>
858#include <ATen/ops/_foreach_addcdiv_ops.h>
859#include <ATen/ops/_foreach_addcdiv_native.h>
860#include <ATen/ops/_foreach_addcdiv_ops.h>
861#include <ATen/ops/_foreach_addcdiv_native.h>
862#include <ATen/ops/_foreach_addcdiv_ops.h>
863#include <ATen/ops/_foreach_addcdiv_native.h>
864#include <ATen/ops/_foreach_addcdiv_ops.h>
865#include <ATen/ops/_foreach_addcdiv_native.h>
866#include <ATen/ops/_foreach_addcdiv_ops.h>
867#include <ATen/ops/_foreach_addcdiv_native.h>
868#include <ATen/ops/_foreach_addcdiv_ops.h>
869#include <ATen/ops/_foreach_addcmul_native.h>
870#include <ATen/ops/_foreach_addcmul_ops.h>
871#include <ATen/ops/_foreach_addcmul_native.h>
872#include <ATen/ops/_foreach_addcmul_ops.h>
873#include <ATen/ops/_foreach_addcmul_native.h>
874#include <ATen/ops/_foreach_addcmul_ops.h>
875#include <ATen/ops/_foreach_addcmul_native.h>
876#include <ATen/ops/_foreach_addcmul_ops.h>
877#include <ATen/ops/_foreach_addcmul_native.h>
878#include <ATen/ops/_foreach_addcmul_ops.h>
879#include <ATen/ops/_foreach_addcmul_native.h>
880#include <ATen/ops/_foreach_addcmul_ops.h>
881#include <ATen/ops/bucketize_native.h>
882#include <ATen/ops/bucketize_ops.h>
883#include <ATen/ops/bucketize_native.h>
884#include <ATen/ops/bucketize_ops.h>
885#include <ATen/ops/bucketize_native.h>
886#include <ATen/ops/bucketize_ops.h>
887#include <ATen/ops/bucketize_native.h>
888#include <ATen/ops/bucketize_ops.h>
889#include <ATen/ops/mse_loss_native.h>
890#include <ATen/ops/mse_loss_ops.h>
891#include <ATen/ops/mse_loss_native.h>
892#include <ATen/ops/mse_loss_ops.h>
893#include <ATen/ops/multi_margin_loss_native.h>
894#include <ATen/ops/multi_margin_loss_ops.h>
895#include <ATen/ops/multi_margin_loss_native.h>
896#include <ATen/ops/multi_margin_loss_ops.h>
897#include <ATen/ops/multilabel_margin_loss_native.h>
898#include <ATen/ops/multilabel_margin_loss_ops.h>
899#include <ATen/ops/multilabel_margin_loss_native.h>
900#include <ATen/ops/multilabel_margin_loss_ops.h>
901#include <ATen/ops/multilabel_margin_loss_forward_native.h>
902#include <ATen/ops/multilabel_margin_loss_forward_ops.h>
903#include <ATen/ops/multilabel_margin_loss_forward_native.h>
904#include <ATen/ops/multilabel_margin_loss_forward_ops.h>
905#include <ATen/ops/nll_loss2d_forward_native.h>
906#include <ATen/ops/nll_loss2d_forward_ops.h>
907#include <ATen/ops/nll_loss2d_forward_native.h>
908#include <ATen/ops/nll_loss2d_forward_ops.h>
909#include <ATen/ops/soft_margin_loss_native.h>
910#include <ATen/ops/soft_margin_loss_ops.h>
911#include <ATen/ops/soft_margin_loss_native.h>
912#include <ATen/ops/soft_margin_loss_ops.h>
913#include <ATen/ops/glu_backward_native.h>
914#include <ATen/ops/glu_backward_ops.h>
915#include <ATen/ops/glu_backward_native.h>
916#include <ATen/ops/glu_backward_ops.h>
917#include <ATen/ops/glu_backward_jvp_native.h>
918#include <ATen/ops/glu_backward_jvp_ops.h>
919#include <ATen/ops/glu_backward_jvp_native.h>
920#include <ATen/ops/glu_backward_jvp_ops.h>
921#include <ATen/ops/hardtanh_native.h>
922#include <ATen/ops/hardtanh_ops.h>
923#include <ATen/ops/hardtanh_native.h>
924#include <ATen/ops/hardtanh_ops.h>
925#include <ATen/ops/hardtanh_native.h>
926#include <ATen/ops/hardtanh_ops.h>
927#include <ATen/ops/hardtanh_backward_native.h>
928#include <ATen/ops/hardtanh_backward_ops.h>
929#include <ATen/ops/hardtanh_backward_native.h>
930#include <ATen/ops/hardtanh_backward_ops.h>
931#include <ATen/ops/leaky_relu_native.h>
932#include <ATen/ops/leaky_relu_ops.h>
933#include <ATen/ops/leaky_relu_native.h>
934#include <ATen/ops/leaky_relu_ops.h>
935#include <ATen/ops/leaky_relu_native.h>
936#include <ATen/ops/leaky_relu_ops.h>
937#include <ATen/ops/log_sigmoid_backward_native.h>
938#include <ATen/ops/log_sigmoid_backward_ops.h>
939#include <ATen/ops/log_sigmoid_backward_native.h>
940#include <ATen/ops/log_sigmoid_backward_ops.h>
941#include <ATen/ops/softplus_native.h>
942#include <ATen/ops/softplus_ops.h>
943#include <ATen/ops/softplus_native.h>
944#include <ATen/ops/softplus_ops.h>
945#include <ATen/ops/adaptive_avg_pool2d_native.h>
946#include <ATen/ops/adaptive_avg_pool2d_ops.h>
947#include <ATen/ops/adaptive_avg_pool2d_native.h>
948#include <ATen/ops/adaptive_avg_pool2d_ops.h>
949#include <ATen/ops/_adaptive_avg_pool3d_backward_native.h>
950#include <ATen/ops/_adaptive_avg_pool3d_backward_ops.h>
951#include <ATen/ops/_adaptive_avg_pool3d_backward_native.h>
952#include <ATen/ops/_adaptive_avg_pool3d_backward_ops.h>
953#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
954#include <ATen/ops/adaptive_max_pool2d_backward_ops.h>
955#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
956#include <ATen/ops/adaptive_max_pool2d_backward_ops.h>
957#include <ATen/ops/fractional_max_pool2d_backward_native.h>
958#include <ATen/ops/fractional_max_pool2d_backward_ops.h>
959#include <ATen/ops/fractional_max_pool2d_backward_native.h>
960#include <ATen/ops/fractional_max_pool2d_backward_ops.h>
961#include <ATen/ops/fractional_max_pool3d_backward_native.h>
962#include <ATen/ops/fractional_max_pool3d_backward_ops.h>
963#include <ATen/ops/fractional_max_pool3d_backward_native.h>
964#include <ATen/ops/fractional_max_pool3d_backward_ops.h>
965#include <ATen/ops/max_pool3d_with_indices_native.h>
966#include <ATen/ops/max_pool3d_with_indices_ops.h>
967#include <ATen/ops/max_pool3d_with_indices_native.h>
968#include <ATen/ops/max_pool3d_with_indices_ops.h>
969#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
970#include <ATen/ops/max_pool3d_with_indices_backward_ops.h>
971#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
972#include <ATen/ops/max_pool3d_with_indices_backward_ops.h>
973#include <ATen/ops/max_unpool2d_native.h>
974#include <ATen/ops/max_unpool2d_ops.h>
975#include <ATen/ops/max_unpool2d_native.h>
976#include <ATen/ops/max_unpool2d_ops.h>
977#include <ATen/ops/reflection_pad2d_backward_native.h>
978#include <ATen/ops/reflection_pad2d_backward_ops.h>
979#include <ATen/ops/reflection_pad2d_backward_native.h>
980#include <ATen/ops/reflection_pad2d_backward_ops.h>
981#include <ATen/ops/upsample_bilinear2d_native.h>
982#include <ATen/ops/upsample_bilinear2d_ops.h>
983#include <ATen/ops/upsample_bilinear2d_native.h>
984#include <ATen/ops/upsample_bilinear2d_ops.h>
985#include <ATen/ops/upsample_bilinear2d_backward_native.h>
986#include <ATen/ops/upsample_bilinear2d_backward_ops.h>
987#include <ATen/ops/upsample_bilinear2d_backward_native.h>
988#include <ATen/ops/upsample_bilinear2d_backward_ops.h>
989#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
990#include <ATen/ops/_upsample_bilinear2d_aa_ops.h>
991#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
992#include <ATen/ops/_upsample_bilinear2d_aa_ops.h>
993#include <ATen/ops/upsample_trilinear3d_backward_native.h>
994#include <ATen/ops/upsample_trilinear3d_backward_ops.h>
995#include <ATen/ops/upsample_trilinear3d_backward_native.h>
996#include <ATen/ops/upsample_trilinear3d_backward_ops.h>
997#include <ATen/ops/_upsample_nearest_exact3d_native.h>
998#include <ATen/ops/_upsample_nearest_exact3d_ops.h>
999#include <ATen/ops/_upsample_nearest_exact3d_native.h>
1000#include <ATen/ops/_upsample_nearest_exact3d_ops.h>
1001#include <ATen/ops/upsample_nearest3d_backward_native.h>
1002#include <ATen/ops/upsample_nearest3d_backward_ops.h>
1003#include <ATen/ops/upsample_nearest3d_backward_native.h>
1004#include <ATen/ops/upsample_nearest3d_backward_ops.h>
1005#include <ATen/ops/logit_backward_native.h>
1006#include <ATen/ops/logit_backward_ops.h>
1007#include <ATen/ops/logit_backward_native.h>
1008#include <ATen/ops/logit_backward_ops.h>
1009#include <ATen/ops/thnn_conv2d_native.h>
1010#include <ATen/ops/thnn_conv2d_ops.h>
1011#include <ATen/ops/thnn_conv2d_native.h>
1012#include <ATen/ops/thnn_conv2d_ops.h>
1013#include <ATen/ops/_slow_conv2d_backward_native.h>
1014#include <ATen/ops/_slow_conv2d_backward_ops.h>
1015#include <ATen/ops/_slow_conv2d_backward_native.h>
1016#include <ATen/ops/_slow_conv2d_backward_ops.h>
1017#include <ATen/ops/slow_conv3d_native.h>
1018#include <ATen/ops/slow_conv3d_ops.h>
1019#include <ATen/ops/slow_conv3d_native.h>
1020#include <ATen/ops/slow_conv3d_ops.h>
1021#include <ATen/ops/slow_conv3d_forward_native.h>
1022#include <ATen/ops/slow_conv3d_forward_ops.h>
1023#include <ATen/ops/slow_conv3d_forward_native.h>
1024#include <ATen/ops/slow_conv3d_forward_ops.h>
1025#include <ATen/ops/slow_conv_dilated3d_native.h>
1026#include <ATen/ops/slow_conv_dilated3d_ops.h>
1027#include <ATen/ops/slow_conv_dilated3d_native.h>
1028#include <ATen/ops/slow_conv_dilated3d_ops.h>
1029#include <ATen/ops/special_log_ndtr_native.h>
1030#include <ATen/ops/special_log_ndtr_ops.h>
1031#include <ATen/ops/special_log_ndtr_native.h>
1032#include <ATen/ops/special_log_ndtr_ops.h>
1033#include <ATen/ops/special_exp2_native.h>
1034#include <ATen/ops/special_exp2_ops.h>
1035#include <ATen/ops/special_exp2_native.h>
1036#include <ATen/ops/special_exp2_ops.h>
1037#include <ATen/ops/special_digamma_native.h>
1038#include <ATen/ops/special_digamma_ops.h>
1039#include <ATen/ops/special_digamma_native.h>
1040#include <ATen/ops/special_digamma_ops.h>
1041#include <ATen/ops/special_gammaln_native.h>
1042#include <ATen/ops/special_gammaln_ops.h>
1043#include <ATen/ops/special_gammaln_native.h>
1044#include <ATen/ops/special_gammaln_ops.h>
1045#include <ATen/ops/special_erfcx_native.h>
1046#include <ATen/ops/special_erfcx_ops.h>
1047#include <ATen/ops/special_erfcx_native.h>
1048#include <ATen/ops/special_erfcx_ops.h>
1049#include <ATen/ops/special_xlog1py_native.h>
1050#include <ATen/ops/special_xlog1py_ops.h>
1051#include <ATen/ops/special_xlog1py_native.h>
1052#include <ATen/ops/special_xlog1py_ops.h>
1053#include <ATen/ops/special_xlog1py_native.h>
1054#include <ATen/ops/special_xlog1py_ops.h>
1055#include <ATen/ops/special_xlog1py_native.h>
1056#include <ATen/ops/special_xlog1py_ops.h>
1057#include <ATen/ops/special_xlog1py_native.h>
1058#include <ATen/ops/special_xlog1py_ops.h>
1059#include <ATen/ops/special_xlog1py_native.h>
1060#include <ATen/ops/special_xlog1py_ops.h>
1061#include <ATen/ops/special_i1_native.h>
1062#include <ATen/ops/special_i1_ops.h>
1063#include <ATen/ops/special_i1_native.h>
1064#include <ATen/ops/special_i1_ops.h>
1065#include <ATen/ops/special_i1e_native.h>
1066#include <ATen/ops/special_i1e_ops.h>
1067#include <ATen/ops/special_i1e_native.h>
1068#include <ATen/ops/special_i1e_ops.h>
1069#include <ATen/ops/fft_fft_native.h>
1070#include <ATen/ops/fft_fft_ops.h>
1071#include <ATen/ops/fft_fft_native.h>
1072#include <ATen/ops/fft_fft_ops.h>
1073#include <ATen/ops/fft_rfft_native.h>
1074#include <ATen/ops/fft_rfft_ops.h>
1075#include <ATen/ops/fft_rfft_native.h>
1076#include <ATen/ops/fft_rfft_ops.h>
1077#include <ATen/ops/fft_hfft_native.h>
1078#include <ATen/ops/fft_hfft_ops.h>
1079#include <ATen/ops/fft_hfft_native.h>
1080#include <ATen/ops/fft_hfft_ops.h>
1081#include <ATen/ops/fft_hfft2_native.h>
1082#include <ATen/ops/fft_hfft2_ops.h>
1083#include <ATen/ops/fft_hfft2_native.h>
1084#include <ATen/ops/fft_hfft2_ops.h>
1085#include <ATen/ops/fft_ifftn_native.h>
1086#include <ATen/ops/fft_ifftn_ops.h>
1087#include <ATen/ops/fft_ifftn_native.h>
1088#include <ATen/ops/fft_ifftn_ops.h>
1089#include <ATen/ops/fft_rfftn_native.h>
1090#include <ATen/ops/fft_rfftn_ops.h>
1091#include <ATen/ops/fft_rfftn_native.h>
1092#include <ATen/ops/fft_rfftn_ops.h>
1093#include <ATen/ops/fft_hfftn_native.h>
1094#include <ATen/ops/fft_hfftn_ops.h>
1095#include <ATen/ops/fft_hfftn_native.h>
1096#include <ATen/ops/fft_hfftn_ops.h>
1097#include <ATen/ops/fft_fftfreq_native.h>
1098#include <ATen/ops/fft_fftfreq_ops.h>
1099#include <ATen/ops/fft_fftfreq_native.h>
1100#include <ATen/ops/fft_fftfreq_ops.h>
1101#include <ATen/ops/linalg_det_native.h>
1102#include <ATen/ops/linalg_det_ops.h>
1103#include <ATen/ops/linalg_det_native.h>
1104#include <ATen/ops/linalg_det_ops.h>
1105#include <ATen/ops/linalg_ldl_factor_ex_native.h>
1106#include <ATen/ops/linalg_ldl_factor_ex_ops.h>
1107#include <ATen/ops/linalg_ldl_factor_ex_native.h>
1108#include <ATen/ops/linalg_ldl_factor_ex_ops.h>
1109#include <ATen/ops/linalg_lstsq_native.h>
1110#include <ATen/ops/linalg_lstsq_ops.h>
1111#include <ATen/ops/linalg_lstsq_native.h>
1112#include <ATen/ops/linalg_lstsq_ops.h>
1113#include <ATen/ops/linalg_matrix_exp_native.h>
1114#include <ATen/ops/linalg_matrix_exp_ops.h>
1115#include <ATen/ops/linalg_matrix_exp_native.h>
1116#include <ATen/ops/linalg_matrix_exp_ops.h>
1117#include <ATen/ops/linalg_slogdet_native.h>
1118#include <ATen/ops/linalg_slogdet_ops.h>
1119#include <ATen/ops/linalg_slogdet_native.h>
1120#include <ATen/ops/linalg_slogdet_ops.h>
1121#include <ATen/ops/_linalg_eigh_native.h>
1122#include <ATen/ops/_linalg_eigh_ops.h>
1123#include <ATen/ops/_linalg_eigh_native.h>
1124#include <ATen/ops/_linalg_eigh_ops.h>
1125#include <ATen/ops/inner_native.h>
1126#include <ATen/ops/inner_ops.h>
1127#include <ATen/ops/inner_native.h>
1128#include <ATen/ops/inner_ops.h>
1129#include <ATen/ops/linalg_matrix_norm_native.h>
1130#include <ATen/ops/linalg_matrix_norm_ops.h>
1131#include <ATen/ops/linalg_matrix_norm_native.h>
1132#include <ATen/ops/linalg_matrix_norm_ops.h>
1133#include <ATen/ops/linalg_matrix_norm_native.h>
1134#include <ATen/ops/linalg_matrix_norm_ops.h>
1135#include <ATen/ops/linalg_matrix_norm_native.h>
1136#include <ATen/ops/linalg_matrix_norm_ops.h>
1137#include <ATen/ops/linalg_tensorinv_native.h>
1138#include <ATen/ops/linalg_tensorinv_ops.h>
1139#include <ATen/ops/linalg_tensorinv_native.h>
1140#include <ATen/ops/linalg_tensorinv_ops.h>
1141#include <ATen/ops/linalg_matrix_power_native.h>
1142#include <ATen/ops/linalg_matrix_power_ops.h>
1143#include <ATen/ops/linalg_matrix_power_native.h>
1144#include <ATen/ops/linalg_matrix_power_ops.h>
1145#include <ATen/ops/_make_dual_copy_native.h>
1146#include <ATen/ops/_make_dual_copy_ops.h>
1147#include <ATen/ops/_make_dual_copy_native.h>
1148#include <ATen/ops/_make_dual_copy_ops.h>
1149#include <ATen/ops/view_as_real_copy_native.h>
1150#include <ATen/ops/view_as_real_copy_ops.h>
1151#include <ATen/ops/view_as_real_copy_native.h>
1152#include <ATen/ops/view_as_real_copy_ops.h>
1153#include <ATen/ops/view_as_complex_copy_native.h>
1154#include <ATen/ops/view_as_complex_copy_ops.h>
1155#include <ATen/ops/view_as_complex_copy_native.h>
1156#include <ATen/ops/view_as_complex_copy_ops.h>
1157#include <ATen/ops/_conj_copy_native.h>
1158#include <ATen/ops/_conj_copy_ops.h>
1159#include <ATen/ops/_conj_copy_native.h>
1160#include <ATen/ops/_conj_copy_ops.h>
1161#include <ATen/ops/_neg_view_copy_native.h>
1162#include <ATen/ops/_neg_view_copy_ops.h>
1163#include <ATen/ops/_neg_view_copy_native.h>
1164#include <ATen/ops/_neg_view_copy_ops.h>
1165#include <ATen/ops/permute_copy_native.h>
1166#include <ATen/ops/permute_copy_ops.h>
1167#include <ATen/ops/permute_copy_native.h>
1168#include <ATen/ops/permute_copy_ops.h>
1169#include <ATen/ops/split_copy_native.h>
1170#include <ATen/ops/split_copy_ops.h>
1171#include <ATen/ops/split_copy_native.h>
1172#include <ATen/ops/split_copy_ops.h>
1173#include <ATen/ops/t_copy_native.h>
1174#include <ATen/ops/t_copy_ops.h>
1175#include <ATen/ops/t_copy_native.h>
1176#include <ATen/ops/t_copy_ops.h>
1177#include <ATen/ops/ccol_indices_copy_native.h>
1178#include <ATen/ops/ccol_indices_copy_ops.h>
1179#include <ATen/ops/ccol_indices_copy_native.h>
1180#include <ATen/ops/ccol_indices_copy_ops.h>
1181#include <ATen/ops/unfold_copy_native.h>
1182#include <ATen/ops/unfold_copy_ops.h>
1183#include <ATen/ops/unfold_copy_native.h>
1184#include <ATen/ops/unfold_copy_ops.h>
1185#include <ATen/ops/_transformer_encoder_layer_fwd_native.h>
1186#include <ATen/ops/_transformer_encoder_layer_fwd_ops.h>
1187#include <ATen/ops/_transformer_encoder_layer_fwd_native.h>
1188#include <ATen/ops/_transformer_encoder_layer_fwd_ops.h>
1189#include <ATen/ops/_native_multi_head_attention_native.h>
1190#include <ATen/ops/_native_multi_head_attention_ops.h>
1191#include <ATen/ops/_native_multi_head_attention_native.h>
1192#include <ATen/ops/_native_multi_head_attention_ops.h>
1193#include <ATen/ops/_transformer_decoder_only_layer_fwd_native.h>
1194#include <ATen/ops/_transformer_decoder_only_layer_fwd_ops.h>
1195#include <ATen/ops/_transformer_decoder_only_layer_fwd_native.h>
1196#include <ATen/ops/_transformer_decoder_only_layer_fwd_ops.h>
1197#include <ATen/ops/_native_decoder_only_multi_head_attention_native.h>
1198#include <ATen/ops/_native_decoder_only_multi_head_attention_ops.h>
1199#include <ATen/ops/_native_decoder_only_multi_head_attention_native.h>
1200#include <ATen/ops/_native_decoder_only_multi_head_attention_ops.h>
1201#include <ATen/ops/special_modified_bessel_i1_native.h>
1202#include <ATen/ops/special_modified_bessel_i1_ops.h>
1203#include <ATen/ops/special_modified_bessel_i1_native.h>
1204#include <ATen/ops/special_modified_bessel_i1_ops.h>
1205#include <ATen/ops/special_modified_bessel_k0_native.h>
1206#include <ATen/ops/special_modified_bessel_k0_ops.h>
1207#include <ATen/ops/special_modified_bessel_k0_native.h>
1208#include <ATen/ops/special_modified_bessel_k0_ops.h>
1209#include <ATen/ops/_foobar_native.h>
1210#include <ATen/ops/_foobar_ops.h>
1211#include <ATen/ops/_foobar_native.h>
1212#include <ATen/ops/_foobar_ops.h>
1213#include <ATen/ops/view_as_real_native.h>
1214#include <ATen/ops/view_as_real_ops.h>
1215#include <ATen/ops/view_as_real_copy_native.h>
1216#include <ATen/ops/view_as_real_copy_ops.h>
1217#include <ATen/ops/broadcast_to_native.h>
1218#include <ATen/ops/broadcast_to_ops.h>
1219#include <ATen/ops/contiguous_native.h>
1220#include <ATen/ops/contiguous_ops.h>
1221#include <ATen/ops/flatten_native.h>
1222#include <ATen/ops/flatten_ops.h>
1223#include <ATen/ops/flatten_native.h>
1224#include <ATen/ops/flatten_ops.h>
1225#include <ATen/ops/flatten_native.h>
1226#include <ATen/ops/flatten_ops.h>
1227#include <ATen/ops/flatten_native.h>
1228#include <ATen/ops/flatten_ops.h>
1229#include <ATen/ops/moveaxis_native.h>
1230#include <ATen/ops/moveaxis_ops.h>
1231#include <ATen/ops/moveaxis_native.h>
1232#include <ATen/ops/moveaxis_ops.h>
1233#include <ATen/ops/mT_native.h>
1234#include <ATen/ops/mT_ops.h>
1235#include <ATen/ops/ravel_native.h>
1236#include <ATen/ops/ravel_ops.h>
1237#include <ATen/ops/reshape_native.h>
1238#include <ATen/ops/reshape_ops.h>
1239#include <ATen/ops/slice_native.h>
1240#include <ATen/ops/slice_ops.h>
1241#include <ATen/ops/slice_copy_native.h>
1242#include <ATen/ops/slice_copy_ops.h>
1243#include <ATen/ops/hsplit_native.h>
1244#include <ATen/ops/hsplit_ops.h>
1245#include <ATen/ops/hsplit_native.h>
1246#include <ATen/ops/hsplit_ops.h>
1247#include <ATen/ops/t_native.h>
1248#include <ATen/ops/t_ops.h>
1249#include <ATen/ops/t_copy_native.h>
1250#include <ATen/ops/t_copy_ops.h>
1251#include <ATen/ops/coalesce_native.h>
1252#include <ATen/ops/coalesce_ops.h>
1253#include <ATen/ops/_indices_native.h>
1254#include <ATen/ops/_indices_ops.h>
1255#include <ATen/ops/_indices_copy_native.h>
1256#include <ATen/ops/_indices_copy_ops.h>
1257#include <ATen/ops/indices_native.h>
1258#include <ATen/ops/indices_ops.h>
1259#include <ATen/ops/indices_copy_native.h>
1260#include <ATen/ops/indices_copy_ops.h>
1261#include <ATen/ops/crow_indices_native.h>
1262#include <ATen/ops/crow_indices_ops.h>
1263#include <ATen/ops/crow_indices_copy_native.h>
1264#include <ATen/ops/crow_indices_copy_ops.h>
1265#include <ATen/ops/col_indices_native.h>
1266#include <ATen/ops/col_indices_ops.h>
1267#include <ATen/ops/col_indices_copy_native.h>
1268#include <ATen/ops/col_indices_copy_ops.h>
1269#include <ATen/ops/_autocast_to_reduced_precision_native.h>
1270#include <ATen/ops/_autocast_to_reduced_precision_ops.h>
1271#include <ATen/ops/swapaxes_native.h>
1272#include <ATen/ops/swapaxes_ops.h>
1273#include <ATen/ops/unfold_native.h>
1274#include <ATen/ops/unfold_ops.h>
1275#include <ATen/ops/unfold_copy_native.h>
1276#include <ATen/ops/unfold_copy_ops.h>
1277#include <ATen/ops/_cast_Byte_native.h>
1278#include <ATen/ops/_cast_Byte_ops.h>
1279#include <ATen/ops/_cast_Float_native.h>
1280#include <ATen/ops/_cast_Float_ops.h>
1281#include <ATen/ops/is_leaf_native.h>
1282#include <ATen/ops/is_leaf_ops.h>
1283#include <ATen/ops/_assert_async_native.h>
1284#include <ATen/ops/_assert_async_ops.h>
1285#include <ATen/ops/_use_cudnn_ctc_loss_native.h>
1286#include <ATen/ops/_use_cudnn_ctc_loss_ops.h>
1287#include <ATen/ops/_use_cudnn_ctc_loss_native.h>
1288#include <ATen/ops/_use_cudnn_ctc_loss_ops.h>
1289#include <ATen/ops/_reshape_from_tensor_native.h>
1290#include <ATen/ops/_reshape_from_tensor_ops.h>
1291#include <ATen/ops/dropout_native.h>
1292#include <ATen/ops/dropout_ops.h>
1293#include <ATen/ops/dropout_native.h>
1294#include <ATen/ops/dropout_ops.h>
1295#include <ATen/ops/adaptive_max_pool1d_native.h>
1296#include <ATen/ops/adaptive_max_pool1d_ops.h>
1297#include <ATen/ops/_is_any_true_native.h>
1298#include <ATen/ops/_is_any_true_ops.h>
1299#include <ATen/ops/arange_native.h>
1300#include <ATen/ops/arange_ops.h>
1301#include <ATen/ops/atleast_2d_native.h>
1302#include <ATen/ops/atleast_2d_ops.h>
1303#include <ATen/ops/atleast_2d_native.h>
1304#include <ATen/ops/atleast_2d_ops.h>
1305#include <ATen/ops/_batch_norm_impl_index_backward_native.h>
1306#include <ATen/ops/_batch_norm_impl_index_backward_ops.h>
1307#include <ATen/ops/_convolution_double_backward_native.h>
1308#include <ATen/ops/_convolution_double_backward_ops.h>
1309#include <ATen/ops/conv1d_native.h>
1310#include <ATen/ops/conv1d_ops.h>
1311#include <ATen/ops/conv1d_native.h>
1312#include <ATen/ops/conv1d_ops.h>
1313#include <ATen/ops/corrcoef_native.h>
1314#include <ATen/ops/corrcoef_ops.h>
1315#include <ATen/ops/cummaxmin_backward_native.h>
1316#include <ATen/ops/cummaxmin_backward_ops.h>
1317#include <ATen/ops/cumprod_backward_native.h>
1318#include <ATen/ops/cumprod_backward_ops.h>
1319#include <ATen/ops/embedding_sparse_backward_native.h>
1320#include <ATen/ops/embedding_sparse_backward_ops.h>
1321#include <ATen/ops/_rowwise_prune_native.h>
1322#include <ATen/ops/_rowwise_prune_ops.h>
1323#include <ATen/ops/_embedding_bag_sparse_backward_native.h>
1324#include <ATen/ops/_embedding_bag_sparse_backward_ops.h>
1325#include <ATen/ops/floor_divide_native.h>
1326#include <ATen/ops/floor_divide_ops.h>
1327#include <ATen/ops/floor_divide_native.h>
1328#include <ATen/ops/floor_divide_ops.h>
1329#include <ATen/ops/group_norm_native.h>
1330#include <ATen/ops/group_norm_ops.h>
1331#include <ATen/ops/_validate_compressed_sparse_indices_native.h>
1332#include <ATen/ops/_validate_compressed_sparse_indices_ops.h>
1333#include <ATen/ops/is_distributed_native.h>
1334#include <ATen/ops/is_distributed_ops.h>
1335#include <ATen/ops/is_neg_native.h>
1336#include <ATen/ops/is_neg_ops.h>
1337#include <ATen/ops/is_same_size_native.h>
1338#include <ATen/ops/is_same_size_ops.h>
1339#include <ATen/ops/is_signed_native.h>
1340#include <ATen/ops/is_signed_ops.h>
1341#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_native.h>
1342#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_ops.h>
1343#include <ATen/ops/matrix_exp_backward_native.h>
1344#include <ATen/ops/matrix_exp_backward_ops.h>
1345#include <ATen/ops/max_pool2d_native.h>
1346#include <ATen/ops/max_pool2d_ops.h>
1347#include <ATen/ops/miopen_convolution_add_relu_native.h>
1348#include <ATen/ops/miopen_convolution_add_relu_ops.h>
1349#include <ATen/ops/cosine_similarity_native.h>
1350#include <ATen/ops/cosine_similarity_ops.h>
1351#include <ATen/ops/native_channel_shuffle_native.h>
1352#include <ATen/ops/native_channel_shuffle_ops.h>
1353#include <ATen/ops/pinverse_native.h>
1354#include <ATen/ops/pinverse_ops.h>
1355#include <ATen/ops/rrelu_native.h>
1356#include <ATen/ops/rrelu_ops.h>
1357#include <ATen/ops/rrelu_native.h>
1358#include <ATen/ops/rrelu_ops.h>
1359#include <ATen/ops/prelu_native.h>
1360#include <ATen/ops/prelu_ops.h>
1361#include <ATen/ops/softmax_native.h>
1362#include <ATen/ops/softmax_ops.h>
1363#include <ATen/ops/stft_native.h>
1364#include <ATen/ops/stft_ops.h>
1365#include <ATen/ops/stft_native.h>
1366#include <ATen/ops/stft_ops.h>
1367#include <ATen/ops/tile_native.h>
1368#include <ATen/ops/tile_ops.h>
1369#include <ATen/ops/trapezoid_native.h>
1370#include <ATen/ops/trapezoid_ops.h>
1371#include <ATen/ops/trapezoid_native.h>
1372#include <ATen/ops/trapezoid_ops.h>
1373#include <ATen/ops/trapz_native.h>
1374#include <ATen/ops/trapz_ops.h>
1375#include <ATen/ops/trapz_native.h>
1376#include <ATen/ops/trapz_ops.h>
1377#include <ATen/ops/_has_compatible_shallow_copy_type_native.h>
1378#include <ATen/ops/_has_compatible_shallow_copy_type_ops.h>
1379#include <ATen/ops/vander_native.h>
1380#include <ATen/ops/vander_ops.h>
1381#include <ATen/ops/where_native.h>
1382#include <ATen/ops/where_ops.h>
1383#include <ATen/ops/where_native.h>
1384#include <ATen/ops/where_ops.h>
1385#include <ATen/ops/where_native.h>
1386#include <ATen/ops/where_ops.h>
1387#include <ATen/ops/where_native.h>
1388#include <ATen/ops/where_ops.h>
1389#include <ATen/ops/_weight_norm_native.h>
1390#include <ATen/ops/_weight_norm_ops.h>
1391#include <ATen/ops/_weight_norm_differentiable_backward_native.h>
1392#include <ATen/ops/_weight_norm_differentiable_backward_ops.h>
1393#include <ATen/ops/_sparse_sum_native.h>
1394#include <ATen/ops/_sparse_sum_ops.h>
1395#include <ATen/ops/_sparse_sum_native.h>
1396#include <ATen/ops/_sparse_sum_ops.h>
1397#include <ATen/ops/_sparse_sum_native.h>
1398#include <ATen/ops/_sparse_sum_ops.h>
1399#include <ATen/ops/_sparse_softmax_native.h>
1400#include <ATen/ops/_sparse_softmax_ops.h>
1401#include <ATen/ops/_sparse_softmax_native.h>
1402#include <ATen/ops/_sparse_softmax_ops.h>
1403#include <ATen/ops/_sparse_mm_reduce_impl_native.h>
1404#include <ATen/ops/_sparse_mm_reduce_impl_ops.h>
1405#include <ATen/ops/sparse_csr_tensor_native.h>
1406#include <ATen/ops/sparse_csr_tensor_ops.h>
1407#include <ATen/ops/sparse_bsc_tensor_native.h>
1408#include <ATen/ops/sparse_bsc_tensor_ops.h>
1409#include <ATen/ops/sparse_csr_tensor_native.h>
1410#include <ATen/ops/sparse_csr_tensor_ops.h>
1411#include <ATen/ops/sparse_bsc_tensor_native.h>
1412#include <ATen/ops/sparse_bsc_tensor_ops.h>
1413#include <ATen/ops/_sparse_csr_tensor_unsafe_native.h>
1414#include <ATen/ops/_sparse_csr_tensor_unsafe_ops.h>
1415#include <ATen/ops/_sparse_bsc_tensor_unsafe_native.h>
1416#include <ATen/ops/_sparse_bsc_tensor_unsafe_ops.h>
1417#include <ATen/ops/_validate_sparse_bsr_tensor_args_native.h>
1418#include <ATen/ops/_validate_sparse_bsr_tensor_args_ops.h>
1419#include <ATen/ops/sparse_dim_native.h>
1420#include <ATen/ops/sparse_dim_ops.h>
1421#include <ATen/ops/is_coalesced_native.h>
1422#include <ATen/ops/is_coalesced_ops.h>
1423#include <ATen/ops/q_scale_native.h>
1424#include <ATen/ops/q_scale_ops.h>
1425#include <ATen/ops/_saturate_weight_to_fp16_native.h>
1426#include <ATen/ops/_saturate_weight_to_fp16_ops.h>
1427#include <ATen/ops/item_native.h>
1428#include <ATen/ops/item_ops.h>
1429#include <ATen/ops/_thnn_fused_lstm_cell_backward_native.h>
1430#include <ATen/ops/_thnn_fused_lstm_cell_backward_ops.h>
1431#include <ATen/ops/_thnn_differentiable_lstm_cell_backward_native.h>
1432#include <ATen/ops/_thnn_differentiable_lstm_cell_backward_ops.h>
1433#include <ATen/ops/rnn_relu_native.h>
1434#include <ATen/ops/rnn_relu_ops.h>
1435#include <ATen/ops/rnn_relu_native.h>
1436#include <ATen/ops/rnn_relu_ops.h>
1437#include <ATen/ops/quantized_lstm_cell_native.h>
1438#include <ATen/ops/quantized_lstm_cell_ops.h>
1439#include <ATen/ops/quantized_rnn_relu_cell_native.h>
1440#include <ATen/ops/quantized_rnn_relu_cell_ops.h>
1441#include <ATen/ops/_pack_padded_sequence_backward_native.h>
1442#include <ATen/ops/_pack_padded_sequence_backward_ops.h>
1443#include <ATen/ops/histogramdd_native.h>
1444#include <ATen/ops/histogramdd_ops.h>
1445#include <ATen/ops/histogramdd_native.h>
1446#include <ATen/ops/histogramdd_ops.h>
1447#include <ATen/ops/histogramdd_native.h>
1448#include <ATen/ops/histogramdd_ops.h>
1449#include <ATen/ops/min_native.h>
1450#include <ATen/ops/min_ops.h>
1451#include <ATen/ops/l1_loss_native.h>
1452#include <ATen/ops/l1_loss_ops.h>
1453#include <ATen/ops/upsample_bilinear2d_native.h>
1454#include <ATen/ops/upsample_bilinear2d_ops.h>
1455#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
1456#include <ATen/ops/_upsample_bilinear2d_aa_ops.h>
1457#include <ATen/ops/_upsample_nearest_exact3d_native.h>
1458#include <ATen/ops/_upsample_nearest_exact3d_ops.h>
1459#include <ATen/ops/_slow_conv2d_backward_native.h>
1460#include <ATen/ops/_slow_conv2d_backward_ops.h>
1461#include <ATen/ops/isfinite_native.h>
1462#include <ATen/ops/isfinite_ops.h>
1463#include <ATen/ops/special_log_softmax_native.h>
1464#include <ATen/ops/special_log_softmax_ops.h>
1465#include <ATen/ops/_nested_tensor_softmax_with_shape_native.h>
1466#include <ATen/ops/_nested_tensor_softmax_with_shape_ops.h>
1467#include <ATen/ops/scaled_dot_product_attention_native.h>
1468#include <ATen/ops/scaled_dot_product_attention_ops.h>
1469#include <ATen/ops/_fused_sdp_choice_native.h>
1470#include <ATen/ops/_fused_sdp_choice_ops.h>
1471#include <ATen/ops/_scaled_dot_product_attention_math_native.h>
1472#include <ATen/ops/_scaled_dot_product_attention_math_ops.h>
1473#endif
1474
1475namespace at {
1476namespace functionalization {
1477
1478// This keyset is used by functionalization when it calls into meta kernels
1479// to accurately propagate stride metadata.
1480// Exclude any modes: the purpose of calling into meta kernels is only as an implementation
1481// detail to perform shape inference, and we don't want any modal keys to run.
1482// Specifically, we want to prevent functionalization and Python modes from running.
1483constexpr auto exclude_keys_for_meta_dispatch =
1484 c10::functorch_transforms_ks |
1485 c10::DispatchKeySet({
1486 c10::DispatchKey::FuncTorchDynamicLayerBackMode,
1487 c10::DispatchKey::FuncTorchDynamicLayerFrontMode,
1488 c10::DispatchKey::Python
1489 });
1490
1491
1492inline Tensor to_meta(const Tensor& t) {
1493 if (!t.defined()) return t;
1494 return at::native::empty_strided_meta_symint(t.sym_sizes(), t.sym_strides(),
1495/*dtype=*/c10::make_optional(t.scalar_type()), /*layout=*/c10::make_optional(t.layout()),
1496/*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt);
1497}
1498
1499inline c10::optional<Tensor> to_meta(const c10::optional<Tensor>& t) {
1500 if (t.has_value()) {
1501 return c10::make_optional<Tensor>(to_meta(*t));
1502 }
1503 return c10::nullopt;
1504}
1505
1506inline std::vector<Tensor> to_meta(at::ITensorListRef t_list) {
1507 std::vector<Tensor> outputs;
1508 outputs.reserve(t_list.size());
1509 for (const auto& tensor : t_list) {
1510 outputs.push_back(to_meta(tensor));
1511 }
1512 return outputs;
1513}
1514
1515inline c10::List<Tensor> to_meta(const c10::List<Tensor>& t_list) {
1516 c10::List<Tensor> outputs;
1517 outputs.reserve(t_list.size());
1518 for (const auto i : c10::irange(t_list.size())) {
1519 outputs.push_back(to_meta(t_list[i]));
1520 }
1521 return outputs;
1522}
1523
1524inline c10::List<c10::optional<Tensor>> to_meta(const c10::List<c10::optional<Tensor>>& t_list) {
1525 c10::List<c10::optional<Tensor>> outputs;
1526 outputs.reserve(t_list.size());
1527 for (const auto i : c10::irange(t_list.size())) {
1528 outputs.push_back(to_meta(t_list[i]));
1529 }
1530 return outputs;
1531}
1532
1533
1534
1535 at::Tensor & _cudnn_rnn_flatten_weight_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
1536 if (false) {
1537 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
1538 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
1539 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
1540 auto weight_arr_meta = to_meta(weight_arr);
1541 auto out_meta = to_meta(out);
1542 at::AutoDispatchSkipFunctionalize func_guard;
1543 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
1544 at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr_meta, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out_meta);
1545 }
1546
1547 ::std::vector<at::Tensor> weight_arr_;
1548 if (at::functionalization::impl::isFunctionalTensor(weight_arr)) {
1549 at::functionalization::impl::sync(weight_arr);
1550 weight_arr_ = at::functionalization::impl::from_functional_tensor(weight_arr);
1551 } else {
1552 weight_arr_ = weight_arr.vec();
1553 }
1554
1555 at::Tensor out_;
1556 if (at::functionalization::impl::isFunctionalTensor(out)) {
1557 at::functionalization::impl::sync(out);
1558 out_ = at::functionalization::impl::from_functional_tensor(out);
1559 } else {
1560 out_ = out;
1561 }
1562 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
1563 if ((false || at::functionalization::impl::isFunctionalTensor(weight_arr))) {
1564 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
1565 TORCH_INTERNAL_ASSERT(false,
1566 "mutating a non-functional tensor with a functional tensor is not allowed.",
1567 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
1568 } else {
1569 // case 2: arguments are not functional tensors, so we no-op and redispatch.
1570 at::AutoDispatchSkipFunctionalize guard;
1571 at::Tensor tmp_output = at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr_, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out_);
1572 return out;;
1573 }
1574 } else {
1575 at::Tensor tmp_output;
1576 {
1577 at::AutoDispatchSkipFunctionalize guard;
1578 tmp_output = at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr_, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
1579 }
1580 at::functionalization::impl::replace_(out, tmp_output);
1581 at::functionalization::impl::commit_update(out);
1582 at::functionalization::impl::sync(out);
1583 return out;
1584 }
1585 }
1586
1587 at::Tensor & abs_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1588 if (false) {
1589 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
1590 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
1591 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
1592 auto self_meta = to_meta(self);
1593 auto out_meta = to_meta(out);
1594 at::AutoDispatchSkipFunctionalize func_guard;
1595 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
1596 at::_ops::abs_out::call(self_meta, out_meta);
1597 }
1598
1599 at::Tensor self_;
1600 if (at::functionalization::impl::isFunctionalTensor(self)) {
1601 at::functionalization::impl::sync(self);
1602 self_ = at::functionalization::impl::from_functional_tensor(self);
1603 } else {
1604 self_ = self;
1605 }
1606
1607 at::Tensor out_;
1608 if (at::functionalization::impl::isFunctionalTensor(out)) {
1609 at::functionalization::impl::sync(out);
1610 out_ = at::functionalization::impl::from_functional_tensor(out);
1611 } else {
1612 out_ = out;
1613 }
1614 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
1615 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
1616 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
1617 TORCH_INTERNAL_ASSERT(false,
1618 "mutating a non-functional tensor with a functional tensor is not allowed.",
1619 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
1620 } else {
1621 // case 2: arguments are not functional tensors, so we no-op and redispatch.
1622 at::AutoDispatchSkipFunctionalize guard;
1623 at::Tensor tmp_output = at::_ops::abs_out::call(self_, out_);
1624 return out;;
1625 }
1626 } else {
1627 at::Tensor tmp_output;
1628 {
1629 at::AutoDispatchSkipFunctionalize guard;
1630 tmp_output = at::_ops::abs::call(self_);
1631 }
1632 at::functionalization::impl::replace_(out, tmp_output);
1633 at::functionalization::impl::commit_update(out);
1634 at::functionalization::impl::sync(out);
1635 return out;
1636 }
1637 }
1638
1639 at::Tensor & abs_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1640 if (true) {
1641 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
1642 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
1643 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
1644 auto self_meta = to_meta(self);
1645 at::AutoDispatchSkipFunctionalize func_guard;
1646 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
1647 at::_ops::abs_::call(self_meta);
1648 }
1649
1650 at::Tensor self_;
1651 if (at::functionalization::impl::isFunctionalTensor(self)) {
1652 at::functionalization::impl::sync(self);
1653 self_ = at::functionalization::impl::from_functional_tensor(self);
1654 } else {
1655 self_ = self;
1656 }
1657 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
1658 if ((false)) {
1659 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
1660 TORCH_INTERNAL_ASSERT(false,
1661 "mutating a non-functional tensor with a functional tensor is not allowed.",
1662 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
1663 } else {
1664 // case 2: arguments are not functional tensors, so we no-op and redispatch.
1665 at::AutoDispatchSkipFunctionalize guard;
1666 at::Tensor tmp_output = at::_ops::abs_::call(self_);
1667 return self;;
1668 }
1669 } else {
1670 at::Tensor tmp_output;
1671 {
1672 at::AutoDispatchSkipFunctionalize guard;
1673 tmp_output = at::_ops::abs::call(self_);
1674 }
1675 at::functionalization::impl::replace_(self, tmp_output);
1676 at::functionalization::impl::commit_update(self);
1677 at::functionalization::impl::sync(self);
1678 return self;
1679 }
1680 }
1681
1682 at::Tensor & absolute_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1683 if (false) {
1684 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
1685 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
1686 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
1687 auto self_meta = to_meta(self);
1688 auto out_meta = to_meta(out);
1689 at::AutoDispatchSkipFunctionalize func_guard;
1690 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
1691 at::_ops::absolute_out::call(self_meta, out_meta);
1692 }
1693
1694 at::Tensor self_;
1695 if (at::functionalization::impl::isFunctionalTensor(self)) {
1696 at::functionalization::impl::sync(self);
1697 self_ = at::functionalization::impl::from_functional_tensor(self);
1698 } else {
1699 self_ = self;
1700 }
1701
1702 at::Tensor out_;
1703 if (at::functionalization::impl::isFunctionalTensor(out)) {
1704 at::functionalization::impl::sync(out);
1705 out_ = at::functionalization::impl::from_functional_tensor(out);
1706 } else {
1707 out_ = out;
1708 }
1709 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
1710 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
1711 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
1712 TORCH_INTERNAL_ASSERT(false,
1713 "mutating a non-functional tensor with a functional tensor is not allowed.",
1714 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
1715 } else {
1716 // case 2: arguments are not functional tensors, so we no-op and redispatch.
1717 at::AutoDispatchSkipFunctionalize guard;
1718 at::Tensor tmp_output = at::_ops::absolute_out::call(self_, out_);
1719 return out;;
1720 }
1721 } else {
1722 at::Tensor tmp_output;
1723 {
1724 at::AutoDispatchSkipFunctionalize guard;
1725 tmp_output = at::_ops::absolute::call(self_);
1726 }
1727 at::functionalization::impl::replace_(out, tmp_output);
1728 at::functionalization::impl::commit_update(out);
1729 at::functionalization::impl::sync(out);
1730 return out;
1731 }
1732 }
1733
1734 at::Tensor & absolute_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1735 if (true) {
1736 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
1737 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
1738 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
1739 auto self_meta = to_meta(self);
1740 at::AutoDispatchSkipFunctionalize func_guard;
1741 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
1742 at::_ops::absolute_::call(self_meta);
1743 }
1744
1745 at::Tensor self_;
1746 if (at::functionalization::impl::isFunctionalTensor(self)) {
1747 at::functionalization::impl::sync(self);
1748 self_ = at::functionalization::impl::from_functional_tensor(self);
1749 } else {
1750 self_ = self;
1751 }
1752 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
1753 if ((false)) {
1754 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
1755 TORCH_INTERNAL_ASSERT(false,
1756 "mutating a non-functional tensor with a functional tensor is not allowed.",
1757 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
1758 } else {
1759 // case 2: arguments are not functional tensors, so we no-op and redispatch.
1760 at::AutoDispatchSkipFunctionalize guard;
1761 at::Tensor tmp_output = at::_ops::absolute_::call(self_);
1762 return self;;
1763 }
1764 } else {
1765 at::Tensor tmp_output;
1766 {
1767 at::AutoDispatchSkipFunctionalize guard;
1768 tmp_output = at::_ops::absolute::call(self_);
1769 }
1770 at::functionalization::impl::replace_(self, tmp_output);
1771 at::functionalization::impl::commit_update(self);
1772 at::functionalization::impl::sync(self);
1773 return self;
1774 }
1775 }
1776
1777 at::Tensor & _conj_physical_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1778 if (false) {
1779 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
1780 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
1781 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
1782 auto self_meta = to_meta(self);
1783 auto out_meta = to_meta(out);
1784 at::AutoDispatchSkipFunctionalize func_guard;
1785 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
1786 at::_ops::_conj_physical_out::call(self_meta, out_meta);
1787 }
1788
1789 at::Tensor self_;
1790 if (at::functionalization::impl::isFunctionalTensor(self)) {
1791 at::functionalization::impl::sync(self);
1792 self_ = at::functionalization::impl::from_functional_tensor(self);
1793 } else {
1794 self_ = self;
1795 }
1796
1797 at::Tensor out_;
1798 if (at::functionalization::impl::isFunctionalTensor(out)) {
1799 at::functionalization::impl::sync(out);
1800 out_ = at::functionalization::impl::from_functional_tensor(out);
1801 } else {
1802 out_ = out;
1803 }
1804 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
1805 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
1806 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
1807 TORCH_INTERNAL_ASSERT(false,
1808 "mutating a non-functional tensor with a functional tensor is not allowed.",
1809 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
1810 } else {
1811 // case 2: arguments are not functional tensors, so we no-op and redispatch.
1812 at::AutoDispatchSkipFunctionalize guard;
1813 at::Tensor tmp_output = at::_ops::_conj_physical_out::call(self_, out_);
1814 return out;;
1815 }
1816 } else {
1817 at::Tensor tmp_output;
1818 {
1819 at::AutoDispatchSkipFunctionalize guard;
1820 tmp_output = at::_ops::_conj_physical::call(self_);
1821 }
1822 at::functionalization::impl::replace_(out, tmp_output);
1823 at::functionalization::impl::commit_update(out);
1824 at::functionalization::impl::sync(out);
1825 return out;
1826 }
1827 }
1828
1829 at::Tensor & acos_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1830 if (false) {
1831 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
1832 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
1833 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
1834 auto self_meta = to_meta(self);
1835 auto out_meta = to_meta(out);
1836 at::AutoDispatchSkipFunctionalize func_guard;
1837 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
1838 at::_ops::acos_out::call(self_meta, out_meta);
1839 }
1840
1841 at::Tensor self_;
1842 if (at::functionalization::impl::isFunctionalTensor(self)) {
1843 at::functionalization::impl::sync(self);
1844 self_ = at::functionalization::impl::from_functional_tensor(self);
1845 } else {
1846 self_ = self;
1847 }
1848
1849 at::Tensor out_;
1850 if (at::functionalization::impl::isFunctionalTensor(out)) {
1851 at::functionalization::impl::sync(out);
1852 out_ = at::functionalization::impl::from_functional_tensor(out);
1853 } else {
1854 out_ = out;
1855 }
1856 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
1857 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
1858 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
1859 TORCH_INTERNAL_ASSERT(false,
1860 "mutating a non-functional tensor with a functional tensor is not allowed.",
1861 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
1862 } else {
1863 // case 2: arguments are not functional tensors, so we no-op and redispatch.
1864 at::AutoDispatchSkipFunctionalize guard;
1865 at::Tensor tmp_output = at::_ops::acos_out::call(self_, out_);
1866 return out;;
1867 }
1868 } else {
1869 at::Tensor tmp_output;
1870 {
1871 at::AutoDispatchSkipFunctionalize guard;
1872 tmp_output = at::_ops::acos::call(self_);
1873 }
1874 at::functionalization::impl::replace_(out, tmp_output);
1875 at::functionalization::impl::commit_update(out);
1876 at::functionalization::impl::sync(out);
1877 return out;
1878 }
1879 }
1880
1881 at::Tensor & acos_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1882 if (true) {
1883 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
1884 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
1885 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
1886 auto self_meta = to_meta(self);
1887 at::AutoDispatchSkipFunctionalize func_guard;
1888 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
1889 at::_ops::acos_::call(self_meta);
1890 }
1891
1892 at::Tensor self_;
1893 if (at::functionalization::impl::isFunctionalTensor(self)) {
1894 at::functionalization::impl::sync(self);
1895 self_ = at::functionalization::impl::from_functional_tensor(self);
1896 } else {
1897 self_ = self;
1898 }
1899 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
1900 if ((false)) {
1901 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
1902 TORCH_INTERNAL_ASSERT(false,
1903 "mutating a non-functional tensor with a functional tensor is not allowed.",
1904 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
1905 } else {
1906 // case 2: arguments are not functional tensors, so we no-op and redispatch.
1907 at::AutoDispatchSkipFunctionalize guard;
1908 at::Tensor tmp_output = at::_ops::acos_::call(self_);
1909 return self;;
1910 }
1911 } else {
1912 at::Tensor tmp_output;
1913 {
1914 at::AutoDispatchSkipFunctionalize guard;
1915 tmp_output = at::_ops::acos::call(self_);
1916 }
1917 at::functionalization::impl::replace_(self, tmp_output);
1918 at::functionalization::impl::commit_update(self);
1919 at::functionalization::impl::sync(self);
1920 return self;
1921 }
1922 }
1923
1924 at::Tensor & arccos_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
1925 if (false) {
1926 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
1927 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
1928 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
1929 auto self_meta = to_meta(self);
1930 auto out_meta = to_meta(out);
1931 at::AutoDispatchSkipFunctionalize func_guard;
1932 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
1933 at::_ops::arccos_out::call(self_meta, out_meta);
1934 }
1935
1936 at::Tensor self_;
1937 if (at::functionalization::impl::isFunctionalTensor(self)) {
1938 at::functionalization::impl::sync(self);
1939 self_ = at::functionalization::impl::from_functional_tensor(self);
1940 } else {
1941 self_ = self;
1942 }
1943
1944 at::Tensor out_;
1945 if (at::functionalization::impl::isFunctionalTensor(out)) {
1946 at::functionalization::impl::sync(out);
1947 out_ = at::functionalization::impl::from_functional_tensor(out);
1948 } else {
1949 out_ = out;
1950 }
1951 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
1952 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
1953 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
1954 TORCH_INTERNAL_ASSERT(false,
1955 "mutating a non-functional tensor with a functional tensor is not allowed.",
1956 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
1957 } else {
1958 // case 2: arguments are not functional tensors, so we no-op and redispatch.
1959 at::AutoDispatchSkipFunctionalize guard;
1960 at::Tensor tmp_output = at::_ops::arccos_out::call(self_, out_);
1961 return out;;
1962 }
1963 } else {
1964 at::Tensor tmp_output;
1965 {
1966 at::AutoDispatchSkipFunctionalize guard;
1967 tmp_output = at::_ops::arccos::call(self_);
1968 }
1969 at::functionalization::impl::replace_(out, tmp_output);
1970 at::functionalization::impl::commit_update(out);
1971 at::functionalization::impl::sync(out);
1972 return out;
1973 }
1974 }
1975
1976 at::Tensor & arccos_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
1977 if (true) {
1978 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
1979 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
1980 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
1981 auto self_meta = to_meta(self);
1982 at::AutoDispatchSkipFunctionalize func_guard;
1983 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
1984 at::_ops::arccos_::call(self_meta);
1985 }
1986
1987 at::Tensor self_;
1988 if (at::functionalization::impl::isFunctionalTensor(self)) {
1989 at::functionalization::impl::sync(self);
1990 self_ = at::functionalization::impl::from_functional_tensor(self);
1991 } else {
1992 self_ = self;
1993 }
1994 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
1995 if ((false)) {
1996 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
1997 TORCH_INTERNAL_ASSERT(false,
1998 "mutating a non-functional tensor with a functional tensor is not allowed.",
1999 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2000 } else {
2001 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2002 at::AutoDispatchSkipFunctionalize guard;
2003 at::Tensor tmp_output = at::_ops::arccos_::call(self_);
2004 return self;;
2005 }
2006 } else {
2007 at::Tensor tmp_output;
2008 {
2009 at::AutoDispatchSkipFunctionalize guard;
2010 tmp_output = at::_ops::arccos::call(self_);
2011 }
2012 at::functionalization::impl::replace_(self, tmp_output);
2013 at::functionalization::impl::commit_update(self);
2014 at::functionalization::impl::sync(self);
2015 return self;
2016 }
2017 }
2018
2019 at::Tensor & affine_grid_generator_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, at::IntArrayRef size, bool align_corners, at::Tensor & out) {
2020 if (false) {
2021 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2022 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2023 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2024 auto theta_meta = to_meta(theta);
2025 auto out_meta = to_meta(out);
2026 at::AutoDispatchSkipFunctionalize func_guard;
2027 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2028 at::_ops::affine_grid_generator_out::call(theta_meta, size, align_corners, out_meta);
2029 }
2030
2031 at::Tensor theta_;
2032 if (at::functionalization::impl::isFunctionalTensor(theta)) {
2033 at::functionalization::impl::sync(theta);
2034 theta_ = at::functionalization::impl::from_functional_tensor(theta);
2035 } else {
2036 theta_ = theta;
2037 }
2038
2039 at::Tensor out_;
2040 if (at::functionalization::impl::isFunctionalTensor(out)) {
2041 at::functionalization::impl::sync(out);
2042 out_ = at::functionalization::impl::from_functional_tensor(out);
2043 } else {
2044 out_ = out;
2045 }
2046 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
2047 if ((false || at::functionalization::impl::isFunctionalTensor(theta))) {
2048 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2049 TORCH_INTERNAL_ASSERT(false,
2050 "mutating a non-functional tensor with a functional tensor is not allowed.",
2051 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2052 } else {
2053 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2054 at::AutoDispatchSkipFunctionalize guard;
2055 at::Tensor tmp_output = at::_ops::affine_grid_generator_out::call(theta_, size, align_corners, out_);
2056 return out;;
2057 }
2058 } else {
2059 at::Tensor tmp_output;
2060 {
2061 at::AutoDispatchSkipFunctionalize guard;
2062 tmp_output = at::_ops::affine_grid_generator::call(theta_, size, align_corners);
2063 }
2064 at::functionalization::impl::replace_(out, tmp_output);
2065 at::functionalization::impl::commit_update(out);
2066 at::functionalization::impl::sync(out);
2067 return out;
2068 }
2069 }
2070
2071 at::Tensor & arange_out_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & end, at::Tensor & out) {
2072 if (false) {
2073 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2074 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2075 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2076 auto out_meta = to_meta(out);
2077 at::AutoDispatchSkipFunctionalize func_guard;
2078 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2079 at::_ops::arange_out::call(end, out_meta);
2080 }
2081
2082 at::Tensor out_;
2083 if (at::functionalization::impl::isFunctionalTensor(out)) {
2084 at::functionalization::impl::sync(out);
2085 out_ = at::functionalization::impl::from_functional_tensor(out);
2086 } else {
2087 out_ = out;
2088 }
2089 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
2090 if ((false)) {
2091 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2092 TORCH_INTERNAL_ASSERT(false,
2093 "mutating a non-functional tensor with a functional tensor is not allowed.",
2094 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2095 } else {
2096 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2097 at::AutoDispatchSkipFunctionalize guard;
2098 at::Tensor tmp_output = at::_ops::arange_out::call(end, out_);
2099 return out;;
2100 }
2101 } else {
2102 at::Tensor tmp_output;
2103 {
2104 at::AutoDispatchSkipFunctionalize guard;
2105 tmp_output = at::_ops::arange::call(end, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
2106 }
2107 at::functionalization::impl::replace_(out, tmp_output);
2108 at::functionalization::impl::commit_update(out);
2109 at::functionalization::impl::sync(out);
2110 return out;
2111 }
2112 }
2113
2114 at::Tensor & arange_out_start_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
2115 if (false) {
2116 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2117 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2118 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2119 auto out_meta = to_meta(out);
2120 at::AutoDispatchSkipFunctionalize func_guard;
2121 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2122 at::_ops::arange_start_out::call(start, end, step, out_meta);
2123 }
2124
2125 at::Tensor out_;
2126 if (at::functionalization::impl::isFunctionalTensor(out)) {
2127 at::functionalization::impl::sync(out);
2128 out_ = at::functionalization::impl::from_functional_tensor(out);
2129 } else {
2130 out_ = out;
2131 }
2132 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
2133 if ((false)) {
2134 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2135 TORCH_INTERNAL_ASSERT(false,
2136 "mutating a non-functional tensor with a functional tensor is not allowed.",
2137 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2138 } else {
2139 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2140 at::AutoDispatchSkipFunctionalize guard;
2141 at::Tensor tmp_output = at::_ops::arange_start_out::call(start, end, step, out_);
2142 return out;;
2143 }
2144 } else {
2145 at::Tensor tmp_output;
2146 {
2147 at::AutoDispatchSkipFunctionalize guard;
2148 tmp_output = at::_ops::arange_start_step::call(start, end, step, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
2149 }
2150 at::functionalization::impl::replace_(out, tmp_output);
2151 at::functionalization::impl::commit_update(out);
2152 at::functionalization::impl::sync(out);
2153 return out;
2154 }
2155 }
2156
2157 at::Tensor & arccosh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
2158 if (false) {
2159 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2160 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2161 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2162 auto self_meta = to_meta(self);
2163 auto out_meta = to_meta(out);
2164 at::AutoDispatchSkipFunctionalize func_guard;
2165 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2166 at::_ops::arccosh_out::call(self_meta, out_meta);
2167 }
2168
2169 at::Tensor self_;
2170 if (at::functionalization::impl::isFunctionalTensor(self)) {
2171 at::functionalization::impl::sync(self);
2172 self_ = at::functionalization::impl::from_functional_tensor(self);
2173 } else {
2174 self_ = self;
2175 }
2176
2177 at::Tensor out_;
2178 if (at::functionalization::impl::isFunctionalTensor(out)) {
2179 at::functionalization::impl::sync(out);
2180 out_ = at::functionalization::impl::from_functional_tensor(out);
2181 } else {
2182 out_ = out;
2183 }
2184 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
2185 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
2186 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2187 TORCH_INTERNAL_ASSERT(false,
2188 "mutating a non-functional tensor with a functional tensor is not allowed.",
2189 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2190 } else {
2191 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2192 at::AutoDispatchSkipFunctionalize guard;
2193 at::Tensor tmp_output = at::_ops::arccosh_out::call(self_, out_);
2194 return out;;
2195 }
2196 } else {
2197 at::Tensor tmp_output;
2198 {
2199 at::AutoDispatchSkipFunctionalize guard;
2200 tmp_output = at::_ops::arccosh::call(self_);
2201 }
2202 at::functionalization::impl::replace_(out, tmp_output);
2203 at::functionalization::impl::commit_update(out);
2204 at::functionalization::impl::sync(out);
2205 return out;
2206 }
2207 }
2208
2209 at::Tensor & arccosh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
2210 if (true) {
2211 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2212 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2213 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2214 auto self_meta = to_meta(self);
2215 at::AutoDispatchSkipFunctionalize func_guard;
2216 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2217 at::_ops::arccosh_::call(self_meta);
2218 }
2219
2220 at::Tensor self_;
2221 if (at::functionalization::impl::isFunctionalTensor(self)) {
2222 at::functionalization::impl::sync(self);
2223 self_ = at::functionalization::impl::from_functional_tensor(self);
2224 } else {
2225 self_ = self;
2226 }
2227 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
2228 if ((false)) {
2229 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2230 TORCH_INTERNAL_ASSERT(false,
2231 "mutating a non-functional tensor with a functional tensor is not allowed.",
2232 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2233 } else {
2234 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2235 at::AutoDispatchSkipFunctionalize guard;
2236 at::Tensor tmp_output = at::_ops::arccosh_::call(self_);
2237 return self;;
2238 }
2239 } else {
2240 at::Tensor tmp_output;
2241 {
2242 at::AutoDispatchSkipFunctionalize guard;
2243 tmp_output = at::_ops::arccosh::call(self_);
2244 }
2245 at::functionalization::impl::replace_(self, tmp_output);
2246 at::functionalization::impl::commit_update(self);
2247 at::functionalization::impl::sync(self);
2248 return self;
2249 }
2250 }
2251
2252 at::Tensor & arcsinh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
2253 if (false) {
2254 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2255 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2256 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2257 auto self_meta = to_meta(self);
2258 auto out_meta = to_meta(out);
2259 at::AutoDispatchSkipFunctionalize func_guard;
2260 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2261 at::_ops::arcsinh_out::call(self_meta, out_meta);
2262 }
2263
2264 at::Tensor self_;
2265 if (at::functionalization::impl::isFunctionalTensor(self)) {
2266 at::functionalization::impl::sync(self);
2267 self_ = at::functionalization::impl::from_functional_tensor(self);
2268 } else {
2269 self_ = self;
2270 }
2271
2272 at::Tensor out_;
2273 if (at::functionalization::impl::isFunctionalTensor(out)) {
2274 at::functionalization::impl::sync(out);
2275 out_ = at::functionalization::impl::from_functional_tensor(out);
2276 } else {
2277 out_ = out;
2278 }
2279 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
2280 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
2281 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2282 TORCH_INTERNAL_ASSERT(false,
2283 "mutating a non-functional tensor with a functional tensor is not allowed.",
2284 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2285 } else {
2286 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2287 at::AutoDispatchSkipFunctionalize guard;
2288 at::Tensor tmp_output = at::_ops::arcsinh_out::call(self_, out_);
2289 return out;;
2290 }
2291 } else {
2292 at::Tensor tmp_output;
2293 {
2294 at::AutoDispatchSkipFunctionalize guard;
2295 tmp_output = at::_ops::arcsinh::call(self_);
2296 }
2297 at::functionalization::impl::replace_(out, tmp_output);
2298 at::functionalization::impl::commit_update(out);
2299 at::functionalization::impl::sync(out);
2300 return out;
2301 }
2302 }
2303
2304 at::Tensor & arcsinh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
2305 if (true) {
2306 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2307 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2308 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2309 auto self_meta = to_meta(self);
2310 at::AutoDispatchSkipFunctionalize func_guard;
2311 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2312 at::_ops::arcsinh_::call(self_meta);
2313 }
2314
2315 at::Tensor self_;
2316 if (at::functionalization::impl::isFunctionalTensor(self)) {
2317 at::functionalization::impl::sync(self);
2318 self_ = at::functionalization::impl::from_functional_tensor(self);
2319 } else {
2320 self_ = self;
2321 }
2322 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
2323 if ((false)) {
2324 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2325 TORCH_INTERNAL_ASSERT(false,
2326 "mutating a non-functional tensor with a functional tensor is not allowed.",
2327 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2328 } else {
2329 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2330 at::AutoDispatchSkipFunctionalize guard;
2331 at::Tensor tmp_output = at::_ops::arcsinh_::call(self_);
2332 return self;;
2333 }
2334 } else {
2335 at::Tensor tmp_output;
2336 {
2337 at::AutoDispatchSkipFunctionalize guard;
2338 tmp_output = at::_ops::arcsinh::call(self_);
2339 }
2340 at::functionalization::impl::replace_(self, tmp_output);
2341 at::functionalization::impl::commit_update(self);
2342 at::functionalization::impl::sync(self);
2343 return self;
2344 }
2345 }
2346
2347 at::Tensor & atanh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
2348 if (false) {
2349 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2350 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2351 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2352 auto self_meta = to_meta(self);
2353 auto out_meta = to_meta(out);
2354 at::AutoDispatchSkipFunctionalize func_guard;
2355 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2356 at::_ops::atanh_out::call(self_meta, out_meta);
2357 }
2358
2359 at::Tensor self_;
2360 if (at::functionalization::impl::isFunctionalTensor(self)) {
2361 at::functionalization::impl::sync(self);
2362 self_ = at::functionalization::impl::from_functional_tensor(self);
2363 } else {
2364 self_ = self;
2365 }
2366
2367 at::Tensor out_;
2368 if (at::functionalization::impl::isFunctionalTensor(out)) {
2369 at::functionalization::impl::sync(out);
2370 out_ = at::functionalization::impl::from_functional_tensor(out);
2371 } else {
2372 out_ = out;
2373 }
2374 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
2375 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
2376 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2377 TORCH_INTERNAL_ASSERT(false,
2378 "mutating a non-functional tensor with a functional tensor is not allowed.",
2379 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2380 } else {
2381 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2382 at::AutoDispatchSkipFunctionalize guard;
2383 at::Tensor tmp_output = at::_ops::atanh_out::call(self_, out_);
2384 return out;;
2385 }
2386 } else {
2387 at::Tensor tmp_output;
2388 {
2389 at::AutoDispatchSkipFunctionalize guard;
2390 tmp_output = at::_ops::atanh::call(self_);
2391 }
2392 at::functionalization::impl::replace_(out, tmp_output);
2393 at::functionalization::impl::commit_update(out);
2394 at::functionalization::impl::sync(out);
2395 return out;
2396 }
2397 }
2398
2399 at::Tensor & atanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
2400 if (true) {
2401 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2402 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2403 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2404 auto self_meta = to_meta(self);
2405 at::AutoDispatchSkipFunctionalize func_guard;
2406 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2407 at::_ops::atanh_::call(self_meta);
2408 }
2409
2410 at::Tensor self_;
2411 if (at::functionalization::impl::isFunctionalTensor(self)) {
2412 at::functionalization::impl::sync(self);
2413 self_ = at::functionalization::impl::from_functional_tensor(self);
2414 } else {
2415 self_ = self;
2416 }
2417 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
2418 if ((false)) {
2419 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2420 TORCH_INTERNAL_ASSERT(false,
2421 "mutating a non-functional tensor with a functional tensor is not allowed.",
2422 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2423 } else {
2424 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2425 at::AutoDispatchSkipFunctionalize guard;
2426 at::Tensor tmp_output = at::_ops::atanh_::call(self_);
2427 return self;;
2428 }
2429 } else {
2430 at::Tensor tmp_output;
2431 {
2432 at::AutoDispatchSkipFunctionalize guard;
2433 tmp_output = at::_ops::atanh::call(self_);
2434 }
2435 at::functionalization::impl::replace_(self, tmp_output);
2436 at::functionalization::impl::commit_update(self);
2437 at::functionalization::impl::sync(self);
2438 return self;
2439 }
2440 }
2441
2442 at::Tensor & asin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
2443 if (false) {
2444 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2445 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2446 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2447 auto self_meta = to_meta(self);
2448 auto out_meta = to_meta(out);
2449 at::AutoDispatchSkipFunctionalize func_guard;
2450 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2451 at::_ops::asin_out::call(self_meta, out_meta);
2452 }
2453
2454 at::Tensor self_;
2455 if (at::functionalization::impl::isFunctionalTensor(self)) {
2456 at::functionalization::impl::sync(self);
2457 self_ = at::functionalization::impl::from_functional_tensor(self);
2458 } else {
2459 self_ = self;
2460 }
2461
2462 at::Tensor out_;
2463 if (at::functionalization::impl::isFunctionalTensor(out)) {
2464 at::functionalization::impl::sync(out);
2465 out_ = at::functionalization::impl::from_functional_tensor(out);
2466 } else {
2467 out_ = out;
2468 }
2469 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
2470 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
2471 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2472 TORCH_INTERNAL_ASSERT(false,
2473 "mutating a non-functional tensor with a functional tensor is not allowed.",
2474 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2475 } else {
2476 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2477 at::AutoDispatchSkipFunctionalize guard;
2478 at::Tensor tmp_output = at::_ops::asin_out::call(self_, out_);
2479 return out;;
2480 }
2481 } else {
2482 at::Tensor tmp_output;
2483 {
2484 at::AutoDispatchSkipFunctionalize guard;
2485 tmp_output = at::_ops::asin::call(self_);
2486 }
2487 at::functionalization::impl::replace_(out, tmp_output);
2488 at::functionalization::impl::commit_update(out);
2489 at::functionalization::impl::sync(out);
2490 return out;
2491 }
2492 }
2493
2494 at::Tensor & asin_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
2495 if (true) {
2496 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2497 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2498 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2499 auto self_meta = to_meta(self);
2500 at::AutoDispatchSkipFunctionalize func_guard;
2501 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2502 at::_ops::asin_::call(self_meta);
2503 }
2504
2505 at::Tensor self_;
2506 if (at::functionalization::impl::isFunctionalTensor(self)) {
2507 at::functionalization::impl::sync(self);
2508 self_ = at::functionalization::impl::from_functional_tensor(self);
2509 } else {
2510 self_ = self;
2511 }
2512 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
2513 if ((false)) {
2514 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2515 TORCH_INTERNAL_ASSERT(false,
2516 "mutating a non-functional tensor with a functional tensor is not allowed.",
2517 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2518 } else {
2519 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2520 at::AutoDispatchSkipFunctionalize guard;
2521 at::Tensor tmp_output = at::_ops::asin_::call(self_);
2522 return self;;
2523 }
2524 } else {
2525 at::Tensor tmp_output;
2526 {
2527 at::AutoDispatchSkipFunctionalize guard;
2528 tmp_output = at::_ops::asin::call(self_);
2529 }
2530 at::functionalization::impl::replace_(self, tmp_output);
2531 at::functionalization::impl::commit_update(self);
2532 at::functionalization::impl::sync(self);
2533 return self;
2534 }
2535 }
2536
2537 at::Tensor & binary_cross_entropy_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
2538 if (false) {
2539 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2540 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2541 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2542 auto grad_output_meta = to_meta(grad_output);
2543 auto self_meta = to_meta(self);
2544 auto target_meta = to_meta(target);
2545 auto weight_meta = to_meta(weight);
2546 auto grad_input_meta = to_meta(grad_input);
2547 at::AutoDispatchSkipFunctionalize func_guard;
2548 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2549 at::_ops::binary_cross_entropy_backward_grad_input::call(grad_output_meta, self_meta, target_meta, weight_meta, reduction, grad_input_meta);
2550 }
2551
2552 at::Tensor grad_output_;
2553 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
2554 at::functionalization::impl::sync(grad_output);
2555 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
2556 } else {
2557 grad_output_ = grad_output;
2558 }
2559
2560 at::Tensor self_;
2561 if (at::functionalization::impl::isFunctionalTensor(self)) {
2562 at::functionalization::impl::sync(self);
2563 self_ = at::functionalization::impl::from_functional_tensor(self);
2564 } else {
2565 self_ = self;
2566 }
2567
2568 at::Tensor target_;
2569 if (at::functionalization::impl::isFunctionalTensor(target)) {
2570 at::functionalization::impl::sync(target);
2571 target_ = at::functionalization::impl::from_functional_tensor(target);
2572 } else {
2573 target_ = target;
2574 }
2575
2576 c10::optional<at::Tensor> weight_;
2577 if (at::functionalization::impl::isFunctionalTensor(weight)) {
2578 at::functionalization::impl::sync(weight);
2579 weight_ = at::functionalization::impl::from_functional_tensor(weight);
2580 } else {
2581 weight_ = weight;
2582 }
2583
2584 at::Tensor grad_input_;
2585 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
2586 at::functionalization::impl::sync(grad_input);
2587 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
2588 } else {
2589 grad_input_ = grad_input;
2590 }
2591 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
2592 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) {
2593 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2594 TORCH_INTERNAL_ASSERT(false,
2595 "mutating a non-functional tensor with a functional tensor is not allowed.",
2596 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2597 } else {
2598 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2599 at::AutoDispatchSkipFunctionalize guard;
2600 at::Tensor tmp_output = at::_ops::binary_cross_entropy_backward_grad_input::call(grad_output_, self_, target_, weight_, reduction, grad_input_);
2601 return grad_input;;
2602 }
2603 } else {
2604 at::Tensor tmp_output;
2605 {
2606 at::AutoDispatchSkipFunctionalize guard;
2607 tmp_output = at::_ops::binary_cross_entropy_backward::call(grad_output_, self_, target_, weight_, reduction);
2608 }
2609 at::functionalization::impl::replace_(grad_input, tmp_output);
2610 at::functionalization::impl::commit_update(grad_input);
2611 at::functionalization::impl::sync(grad_input);
2612 return grad_input;
2613 }
2614 }
2615
2616 at::Tensor & binary_cross_entropy_with_logits_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out) {
2617 if (false) {
2618 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2619 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2620 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2621 auto self_meta = to_meta(self);
2622 auto target_meta = to_meta(target);
2623 auto weight_meta = to_meta(weight);
2624 auto pos_weight_meta = to_meta(pos_weight);
2625 auto out_meta = to_meta(out);
2626 at::AutoDispatchSkipFunctionalize func_guard;
2627 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2628 at::_ops::binary_cross_entropy_with_logits_out::call(self_meta, target_meta, weight_meta, pos_weight_meta, reduction, out_meta);
2629 }
2630
2631 at::Tensor self_;
2632 if (at::functionalization::impl::isFunctionalTensor(self)) {
2633 at::functionalization::impl::sync(self);
2634 self_ = at::functionalization::impl::from_functional_tensor(self);
2635 } else {
2636 self_ = self;
2637 }
2638
2639 at::Tensor target_;
2640 if (at::functionalization::impl::isFunctionalTensor(target)) {
2641 at::functionalization::impl::sync(target);
2642 target_ = at::functionalization::impl::from_functional_tensor(target);
2643 } else {
2644 target_ = target;
2645 }
2646
2647 c10::optional<at::Tensor> weight_;
2648 if (at::functionalization::impl::isFunctionalTensor(weight)) {
2649 at::functionalization::impl::sync(weight);
2650 weight_ = at::functionalization::impl::from_functional_tensor(weight);
2651 } else {
2652 weight_ = weight;
2653 }
2654
2655 c10::optional<at::Tensor> pos_weight_;
2656 if (at::functionalization::impl::isFunctionalTensor(pos_weight)) {
2657 at::functionalization::impl::sync(pos_weight);
2658 pos_weight_ = at::functionalization::impl::from_functional_tensor(pos_weight);
2659 } else {
2660 pos_weight_ = pos_weight;
2661 }
2662
2663 at::Tensor out_;
2664 if (at::functionalization::impl::isFunctionalTensor(out)) {
2665 at::functionalization::impl::sync(out);
2666 out_ = at::functionalization::impl::from_functional_tensor(out);
2667 } else {
2668 out_ = out;
2669 }
2670 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
2671 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(pos_weight))) {
2672 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2673 TORCH_INTERNAL_ASSERT(false,
2674 "mutating a non-functional tensor with a functional tensor is not allowed.",
2675 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2676 } else {
2677 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2678 at::AutoDispatchSkipFunctionalize guard;
2679 at::Tensor tmp_output = at::_ops::binary_cross_entropy_with_logits_out::call(self_, target_, weight_, pos_weight_, reduction, out_);
2680 return out;;
2681 }
2682 } else {
2683 at::Tensor tmp_output;
2684 {
2685 at::AutoDispatchSkipFunctionalize guard;
2686 tmp_output = at::_ops::binary_cross_entropy_with_logits::call(self_, target_, weight_, pos_weight_, reduction);
2687 }
2688 at::functionalization::impl::replace_(out, tmp_output);
2689 at::functionalization::impl::commit_update(out);
2690 at::functionalization::impl::sync(out);
2691 return out;
2692 }
2693 }
2694
2695 at::Tensor & logical_not_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
2696 if (false) {
2697 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2698 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2699 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2700 auto self_meta = to_meta(self);
2701 auto out_meta = to_meta(out);
2702 at::AutoDispatchSkipFunctionalize func_guard;
2703 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2704 at::_ops::logical_not_out::call(self_meta, out_meta);
2705 }
2706
2707 at::Tensor self_;
2708 if (at::functionalization::impl::isFunctionalTensor(self)) {
2709 at::functionalization::impl::sync(self);
2710 self_ = at::functionalization::impl::from_functional_tensor(self);
2711 } else {
2712 self_ = self;
2713 }
2714
2715 at::Tensor out_;
2716 if (at::functionalization::impl::isFunctionalTensor(out)) {
2717 at::functionalization::impl::sync(out);
2718 out_ = at::functionalization::impl::from_functional_tensor(out);
2719 } else {
2720 out_ = out;
2721 }
2722 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
2723 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
2724 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2725 TORCH_INTERNAL_ASSERT(false,
2726 "mutating a non-functional tensor with a functional tensor is not allowed.",
2727 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2728 } else {
2729 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2730 at::AutoDispatchSkipFunctionalize guard;
2731 at::Tensor tmp_output = at::_ops::logical_not_out::call(self_, out_);
2732 return out;;
2733 }
2734 } else {
2735 at::Tensor tmp_output;
2736 {
2737 at::AutoDispatchSkipFunctionalize guard;
2738 tmp_output = at::_ops::logical_not::call(self_);
2739 }
2740 at::functionalization::impl::replace_(out, tmp_output);
2741 at::functionalization::impl::commit_update(out);
2742 at::functionalization::impl::sync(out);
2743 return out;
2744 }
2745 }
2746
2747 at::Tensor & logical_not_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
2748 if (true) {
2749 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2750 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2751 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2752 auto self_meta = to_meta(self);
2753 at::AutoDispatchSkipFunctionalize func_guard;
2754 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2755 at::_ops::logical_not_::call(self_meta);
2756 }
2757
2758 at::Tensor self_;
2759 if (at::functionalization::impl::isFunctionalTensor(self)) {
2760 at::functionalization::impl::sync(self);
2761 self_ = at::functionalization::impl::from_functional_tensor(self);
2762 } else {
2763 self_ = self;
2764 }
2765 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
2766 if ((false)) {
2767 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2768 TORCH_INTERNAL_ASSERT(false,
2769 "mutating a non-functional tensor with a functional tensor is not allowed.",
2770 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2771 } else {
2772 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2773 at::AutoDispatchSkipFunctionalize guard;
2774 at::Tensor tmp_output = at::_ops::logical_not_::call(self_);
2775 return self;;
2776 }
2777 } else {
2778 at::Tensor tmp_output;
2779 {
2780 at::AutoDispatchSkipFunctionalize guard;
2781 tmp_output = at::_ops::logical_not::call(self_);
2782 }
2783 at::functionalization::impl::replace_(self, tmp_output);
2784 at::functionalization::impl::commit_update(self);
2785 at::functionalization::impl::sync(self);
2786 return self;
2787 }
2788 }
2789
2790 at::Tensor & logical_and_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2791 if (false) {
2792 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2793 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2794 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2795 auto self_meta = to_meta(self);
2796 auto other_meta = to_meta(other);
2797 auto out_meta = to_meta(out);
2798 at::AutoDispatchSkipFunctionalize func_guard;
2799 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2800 at::_ops::logical_and_out::call(self_meta, other_meta, out_meta);
2801 }
2802
2803 at::Tensor self_;
2804 if (at::functionalization::impl::isFunctionalTensor(self)) {
2805 at::functionalization::impl::sync(self);
2806 self_ = at::functionalization::impl::from_functional_tensor(self);
2807 } else {
2808 self_ = self;
2809 }
2810
2811 at::Tensor other_;
2812 if (at::functionalization::impl::isFunctionalTensor(other)) {
2813 at::functionalization::impl::sync(other);
2814 other_ = at::functionalization::impl::from_functional_tensor(other);
2815 } else {
2816 other_ = other;
2817 }
2818
2819 at::Tensor out_;
2820 if (at::functionalization::impl::isFunctionalTensor(out)) {
2821 at::functionalization::impl::sync(out);
2822 out_ = at::functionalization::impl::from_functional_tensor(out);
2823 } else {
2824 out_ = out;
2825 }
2826 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
2827 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
2828 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2829 TORCH_INTERNAL_ASSERT(false,
2830 "mutating a non-functional tensor with a functional tensor is not allowed.",
2831 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2832 } else {
2833 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2834 at::AutoDispatchSkipFunctionalize guard;
2835 at::Tensor tmp_output = at::_ops::logical_and_out::call(self_, other_, out_);
2836 return out;;
2837 }
2838 } else {
2839 at::Tensor tmp_output;
2840 {
2841 at::AutoDispatchSkipFunctionalize guard;
2842 tmp_output = at::_ops::logical_and::call(self_, other_);
2843 }
2844 at::functionalization::impl::replace_(out, tmp_output);
2845 at::functionalization::impl::commit_update(out);
2846 at::functionalization::impl::sync(out);
2847 return out;
2848 }
2849 }
2850
2851 at::Tensor & logical_and_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
2852 if (true) {
2853 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2854 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2855 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2856 auto self_meta = to_meta(self);
2857 auto other_meta = to_meta(other);
2858 at::AutoDispatchSkipFunctionalize func_guard;
2859 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2860 at::_ops::logical_and_::call(self_meta, other_meta);
2861 }
2862
2863 at::Tensor self_;
2864 if (at::functionalization::impl::isFunctionalTensor(self)) {
2865 at::functionalization::impl::sync(self);
2866 self_ = at::functionalization::impl::from_functional_tensor(self);
2867 } else {
2868 self_ = self;
2869 }
2870
2871 at::Tensor other_;
2872 if (at::functionalization::impl::isFunctionalTensor(other)) {
2873 at::functionalization::impl::sync(other);
2874 other_ = at::functionalization::impl::from_functional_tensor(other);
2875 } else {
2876 other_ = other;
2877 }
2878 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
2879 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
2880 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2881 TORCH_INTERNAL_ASSERT(false,
2882 "mutating a non-functional tensor with a functional tensor is not allowed.",
2883 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2884 } else {
2885 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2886 at::AutoDispatchSkipFunctionalize guard;
2887 at::Tensor tmp_output = at::_ops::logical_and_::call(self_, other_);
2888 return self;;
2889 }
2890 } else {
2891 at::Tensor tmp_output;
2892 {
2893 at::AutoDispatchSkipFunctionalize guard;
2894 tmp_output = at::_ops::logical_and::call(self_, other_);
2895 }
2896 at::functionalization::impl::replace_(self, tmp_output);
2897 at::functionalization::impl::commit_update(self);
2898 at::functionalization::impl::sync(self);
2899 return self;
2900 }
2901 }
2902
2903 at::Tensor & concatenate_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
2904 if (false) {
2905 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2906 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2907 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2908 auto tensors_meta = to_meta(tensors);
2909 auto out_meta = to_meta(out);
2910 at::AutoDispatchSkipFunctionalize func_guard;
2911 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2912 at::_ops::concatenate_out::call(tensors_meta, dim, out_meta);
2913 }
2914
2915 ::std::vector<at::Tensor> tensors_;
2916 if (at::functionalization::impl::isFunctionalTensor(tensors)) {
2917 at::functionalization::impl::sync(tensors);
2918 tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
2919 } else {
2920 tensors_ = tensors.vec();
2921 }
2922
2923 at::Tensor out_;
2924 if (at::functionalization::impl::isFunctionalTensor(out)) {
2925 at::functionalization::impl::sync(out);
2926 out_ = at::functionalization::impl::from_functional_tensor(out);
2927 } else {
2928 out_ = out;
2929 }
2930 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
2931 if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) {
2932 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2933 TORCH_INTERNAL_ASSERT(false,
2934 "mutating a non-functional tensor with a functional tensor is not allowed.",
2935 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2936 } else {
2937 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2938 at::AutoDispatchSkipFunctionalize guard;
2939 at::Tensor tmp_output = at::_ops::concatenate_out::call(tensors_, dim, out_);
2940 return out;;
2941 }
2942 } else {
2943 at::Tensor tmp_output;
2944 {
2945 at::AutoDispatchSkipFunctionalize guard;
2946 tmp_output = at::_ops::concatenate::call(tensors_, dim);
2947 }
2948 at::functionalization::impl::replace_(out, tmp_output);
2949 at::functionalization::impl::commit_update(out);
2950 at::functionalization::impl::sync(out);
2951 return out;
2952 }
2953 }
2954
2955 at::Tensor & concatenate_out_names_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
2956 if (false) {
2957 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
2958 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
2959 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
2960 auto tensors_meta = to_meta(tensors);
2961 auto out_meta = to_meta(out);
2962 at::AutoDispatchSkipFunctionalize func_guard;
2963 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
2964 at::_ops::concatenate_names_out::call(tensors_meta, dim, out_meta);
2965 }
2966
2967 ::std::vector<at::Tensor> tensors_;
2968 if (at::functionalization::impl::isFunctionalTensor(tensors)) {
2969 at::functionalization::impl::sync(tensors);
2970 tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
2971 } else {
2972 tensors_ = tensors.vec();
2973 }
2974
2975 at::Tensor out_;
2976 if (at::functionalization::impl::isFunctionalTensor(out)) {
2977 at::functionalization::impl::sync(out);
2978 out_ = at::functionalization::impl::from_functional_tensor(out);
2979 } else {
2980 out_ = out;
2981 }
2982 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
2983 if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) {
2984 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
2985 TORCH_INTERNAL_ASSERT(false,
2986 "mutating a non-functional tensor with a functional tensor is not allowed.",
2987 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
2988 } else {
2989 // case 2: arguments are not functional tensors, so we no-op and redispatch.
2990 at::AutoDispatchSkipFunctionalize guard;
2991 at::Tensor tmp_output = at::_ops::concatenate_names_out::call(tensors_, dim, out_);
2992 return out;;
2993 }
2994 } else {
2995 at::Tensor tmp_output;
2996 {
2997 at::AutoDispatchSkipFunctionalize guard;
2998 tmp_output = at::_ops::concatenate_names::call(tensors_, dim);
2999 }
3000 at::functionalization::impl::replace_(out, tmp_output);
3001 at::functionalization::impl::commit_update(out);
3002 at::functionalization::impl::sync(out);
3003 return out;
3004 }
3005 }
3006
3007 at::Tensor & block_diag_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
3008 if (false) {
3009 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3010 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3011 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3012 auto tensors_meta = to_meta(tensors);
3013 auto out_meta = to_meta(out);
3014 at::AutoDispatchSkipFunctionalize func_guard;
3015 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3016 at::_ops::block_diag_out::call(tensors_meta, out_meta);
3017 }
3018
3019 ::std::vector<at::Tensor> tensors_;
3020 if (at::functionalization::impl::isFunctionalTensor(tensors)) {
3021 at::functionalization::impl::sync(tensors);
3022 tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
3023 } else {
3024 tensors_ = tensors.vec();
3025 }
3026
3027 at::Tensor out_;
3028 if (at::functionalization::impl::isFunctionalTensor(out)) {
3029 at::functionalization::impl::sync(out);
3030 out_ = at::functionalization::impl::from_functional_tensor(out);
3031 } else {
3032 out_ = out;
3033 }
3034 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
3035 if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) {
3036 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3037 TORCH_INTERNAL_ASSERT(false,
3038 "mutating a non-functional tensor with a functional tensor is not allowed.",
3039 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3040 } else {
3041 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3042 at::AutoDispatchSkipFunctionalize guard;
3043 at::Tensor tmp_output = at::_ops::block_diag_out::call(tensors_, out_);
3044 return out;;
3045 }
3046 } else {
3047 at::Tensor tmp_output;
3048 {
3049 at::AutoDispatchSkipFunctionalize guard;
3050 tmp_output = at::_ops::block_diag::call(tensors_);
3051 }
3052 at::functionalization::impl::replace_(out, tmp_output);
3053 at::functionalization::impl::commit_update(out);
3054 at::functionalization::impl::sync(out);
3055 return out;
3056 }
3057 }
3058
3059 at::Tensor & chain_matmul_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList matrices, at::Tensor & out) {
3060 if (false) {
3061 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3062 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3063 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3064 auto matrices_meta = to_meta(matrices);
3065 auto out_meta = to_meta(out);
3066 at::AutoDispatchSkipFunctionalize func_guard;
3067 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3068 at::_ops::chain_matmul_out::call(matrices_meta, out_meta);
3069 }
3070
3071 ::std::vector<at::Tensor> matrices_;
3072 if (at::functionalization::impl::isFunctionalTensor(matrices)) {
3073 at::functionalization::impl::sync(matrices);
3074 matrices_ = at::functionalization::impl::from_functional_tensor(matrices);
3075 } else {
3076 matrices_ = matrices.vec();
3077 }
3078
3079 at::Tensor out_;
3080 if (at::functionalization::impl::isFunctionalTensor(out)) {
3081 at::functionalization::impl::sync(out);
3082 out_ = at::functionalization::impl::from_functional_tensor(out);
3083 } else {
3084 out_ = out;
3085 }
3086 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
3087 if ((false || at::functionalization::impl::isFunctionalTensor(matrices))) {
3088 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3089 TORCH_INTERNAL_ASSERT(false,
3090 "mutating a non-functional tensor with a functional tensor is not allowed.",
3091 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3092 } else {
3093 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3094 at::AutoDispatchSkipFunctionalize guard;
3095 at::Tensor tmp_output = at::_ops::chain_matmul_out::call(matrices_, out_);
3096 return out;;
3097 }
3098 } else {
3099 at::Tensor tmp_output;
3100 {
3101 at::AutoDispatchSkipFunctionalize guard;
3102 tmp_output = at::_ops::chain_matmul::call(matrices_);
3103 }
3104 at::functionalization::impl::replace_(out, tmp_output);
3105 at::functionalization::impl::commit_update(out);
3106 at::functionalization::impl::sync(out);
3107 return out;
3108 }
3109 }
3110
3111 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
3112 if (false) {
3113 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3114 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3115 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3116 auto grad_output_meta = to_meta(grad_output);
3117 auto input_meta = to_meta(input);
3118 auto weight_meta = to_meta(weight);
3119 auto out0_meta = to_meta(out0);
3120 auto out1_meta = to_meta(out1);
3121 auto out2_meta = to_meta(out2);
3122 at::AutoDispatchSkipFunctionalize func_guard;
3123 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3124 at::_ops::convolution_backward_out::call(grad_output_meta, input_meta, weight_meta, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0_meta, out1_meta, out2_meta);
3125 }
3126
3127 at::Tensor grad_output_;
3128 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
3129 at::functionalization::impl::sync(grad_output);
3130 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
3131 } else {
3132 grad_output_ = grad_output;
3133 }
3134
3135 at::Tensor input_;
3136 if (at::functionalization::impl::isFunctionalTensor(input)) {
3137 at::functionalization::impl::sync(input);
3138 input_ = at::functionalization::impl::from_functional_tensor(input);
3139 } else {
3140 input_ = input;
3141 }
3142
3143 at::Tensor weight_;
3144 if (at::functionalization::impl::isFunctionalTensor(weight)) {
3145 at::functionalization::impl::sync(weight);
3146 weight_ = at::functionalization::impl::from_functional_tensor(weight);
3147 } else {
3148 weight_ = weight;
3149 }
3150
3151 at::Tensor out0_;
3152 if (at::functionalization::impl::isFunctionalTensor(out0)) {
3153 at::functionalization::impl::sync(out0);
3154 out0_ = at::functionalization::impl::from_functional_tensor(out0);
3155 } else {
3156 out0_ = out0;
3157 }
3158
3159 at::Tensor out1_;
3160 if (at::functionalization::impl::isFunctionalTensor(out1)) {
3161 at::functionalization::impl::sync(out1);
3162 out1_ = at::functionalization::impl::from_functional_tensor(out1);
3163 } else {
3164 out1_ = out1;
3165 }
3166
3167 at::Tensor out2_;
3168 if (at::functionalization::impl::isFunctionalTensor(out2)) {
3169 at::functionalization::impl::sync(out2);
3170 out2_ = at::functionalization::impl::from_functional_tensor(out2);
3171 } else {
3172 out2_ = out2;
3173 }
3174 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
3175 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight))) {
3176 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3177 TORCH_INTERNAL_ASSERT(false,
3178 "mutating a non-functional tensor with a functional tensor is not allowed.",
3179 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3180 } else {
3181 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3182 at::AutoDispatchSkipFunctionalize guard;
3183 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::convolution_backward_out::call(grad_output_, input_, weight_, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0_, out1_, out2_);
3184 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);;
3185 }
3186 } else {
3187 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
3188 {
3189 at::AutoDispatchSkipFunctionalize guard;
3190 tmp_output = at::_ops::convolution_backward::call(grad_output_, input_, weight_, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
3191 }
3192 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
3193 at::functionalization::impl::commit_update(out0);
3194 at::functionalization::impl::sync(out0);
3195 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
3196 at::functionalization::impl::commit_update(out1);
3197 at::functionalization::impl::sync(out1);
3198 at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
3199 at::functionalization::impl::commit_update(out2);
3200 at::functionalization::impl::sync(out2);
3201 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
3202 }
3203 }
3204
3205 at::Tensor & _copy_from_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out) {
3206 if (false) {
3207 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3208 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3209 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3210 auto self_meta = to_meta(self);
3211 auto dst_meta = to_meta(dst);
3212 auto out_meta = to_meta(out);
3213 at::AutoDispatchSkipFunctionalize func_guard;
3214 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3215 at::_ops::_copy_from_out::call(self_meta, dst_meta, non_blocking, out_meta);
3216 }
3217
3218 at::Tensor self_;
3219 if (at::functionalization::impl::isFunctionalTensor(self)) {
3220 at::functionalization::impl::sync(self);
3221 self_ = at::functionalization::impl::from_functional_tensor(self);
3222 } else {
3223 self_ = self;
3224 }
3225
3226 at::Tensor dst_;
3227 if (at::functionalization::impl::isFunctionalTensor(dst)) {
3228 at::functionalization::impl::sync(dst);
3229 dst_ = at::functionalization::impl::from_functional_tensor(dst);
3230 } else {
3231 dst_ = dst;
3232 }
3233
3234 at::Tensor out_;
3235 if (at::functionalization::impl::isFunctionalTensor(out)) {
3236 at::functionalization::impl::sync(out);
3237 out_ = at::functionalization::impl::from_functional_tensor(out);
3238 } else {
3239 out_ = out;
3240 }
3241 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
3242 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(dst))) {
3243 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3244 TORCH_INTERNAL_ASSERT(false,
3245 "mutating a non-functional tensor with a functional tensor is not allowed.",
3246 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3247 } else {
3248 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3249 at::AutoDispatchSkipFunctionalize guard;
3250 at::Tensor tmp_output = at::_ops::_copy_from_out::call(self_, dst_, non_blocking, out_);
3251 return out;;
3252 }
3253 } else {
3254 at::Tensor tmp_output;
3255 {
3256 at::AutoDispatchSkipFunctionalize guard;
3257 tmp_output = at::_ops::_copy_from::call(self_, dst_, non_blocking);
3258 }
3259 at::functionalization::impl::replace_(out, tmp_output);
3260 at::functionalization::impl::commit_update(out);
3261 at::functionalization::impl::sync(out);
3262 return out;
3263 }
3264 }
3265
3266 at::Tensor & cosh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
3267 if (false) {
3268 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3269 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3270 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3271 auto self_meta = to_meta(self);
3272 auto out_meta = to_meta(out);
3273 at::AutoDispatchSkipFunctionalize func_guard;
3274 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3275 at::_ops::cosh_out::call(self_meta, out_meta);
3276 }
3277
3278 at::Tensor self_;
3279 if (at::functionalization::impl::isFunctionalTensor(self)) {
3280 at::functionalization::impl::sync(self);
3281 self_ = at::functionalization::impl::from_functional_tensor(self);
3282 } else {
3283 self_ = self;
3284 }
3285
3286 at::Tensor out_;
3287 if (at::functionalization::impl::isFunctionalTensor(out)) {
3288 at::functionalization::impl::sync(out);
3289 out_ = at::functionalization::impl::from_functional_tensor(out);
3290 } else {
3291 out_ = out;
3292 }
3293 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
3294 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
3295 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3296 TORCH_INTERNAL_ASSERT(false,
3297 "mutating a non-functional tensor with a functional tensor is not allowed.",
3298 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3299 } else {
3300 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3301 at::AutoDispatchSkipFunctionalize guard;
3302 at::Tensor tmp_output = at::_ops::cosh_out::call(self_, out_);
3303 return out;;
3304 }
3305 } else {
3306 at::Tensor tmp_output;
3307 {
3308 at::AutoDispatchSkipFunctionalize guard;
3309 tmp_output = at::_ops::cosh::call(self_);
3310 }
3311 at::functionalization::impl::replace_(out, tmp_output);
3312 at::functionalization::impl::commit_update(out);
3313 at::functionalization::impl::sync(out);
3314 return out;
3315 }
3316 }
3317
3318 at::Tensor & cosh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
3319 if (true) {
3320 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3321 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3322 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3323 auto self_meta = to_meta(self);
3324 at::AutoDispatchSkipFunctionalize func_guard;
3325 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3326 at::_ops::cosh_::call(self_meta);
3327 }
3328
3329 at::Tensor self_;
3330 if (at::functionalization::impl::isFunctionalTensor(self)) {
3331 at::functionalization::impl::sync(self);
3332 self_ = at::functionalization::impl::from_functional_tensor(self);
3333 } else {
3334 self_ = self;
3335 }
3336 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
3337 if ((false)) {
3338 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3339 TORCH_INTERNAL_ASSERT(false,
3340 "mutating a non-functional tensor with a functional tensor is not allowed.",
3341 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3342 } else {
3343 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3344 at::AutoDispatchSkipFunctionalize guard;
3345 at::Tensor tmp_output = at::_ops::cosh_::call(self_);
3346 return self;;
3347 }
3348 } else {
3349 at::Tensor tmp_output;
3350 {
3351 at::AutoDispatchSkipFunctionalize guard;
3352 tmp_output = at::_ops::cosh::call(self_);
3353 }
3354 at::functionalization::impl::replace_(self, tmp_output);
3355 at::functionalization::impl::commit_update(self);
3356 at::functionalization::impl::sync(self);
3357 return self;
3358 }
3359 }
3360
3361 at::Tensor & cudnn_convolution_transpose_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
3362 if (false) {
3363 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3364 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3365 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3366 auto self_meta = to_meta(self);
3367 auto weight_meta = to_meta(weight);
3368 auto out_meta = to_meta(out);
3369 at::AutoDispatchSkipFunctionalize func_guard;
3370 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3371 at::_ops::cudnn_convolution_transpose_out::call(self_meta, weight_meta, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out_meta);
3372 }
3373
3374 at::Tensor self_;
3375 if (at::functionalization::impl::isFunctionalTensor(self)) {
3376 at::functionalization::impl::sync(self);
3377 self_ = at::functionalization::impl::from_functional_tensor(self);
3378 } else {
3379 self_ = self;
3380 }
3381
3382 at::Tensor weight_;
3383 if (at::functionalization::impl::isFunctionalTensor(weight)) {
3384 at::functionalization::impl::sync(weight);
3385 weight_ = at::functionalization::impl::from_functional_tensor(weight);
3386 } else {
3387 weight_ = weight;
3388 }
3389
3390 at::Tensor out_;
3391 if (at::functionalization::impl::isFunctionalTensor(out)) {
3392 at::functionalization::impl::sync(out);
3393 out_ = at::functionalization::impl::from_functional_tensor(out);
3394 } else {
3395 out_ = out;
3396 }
3397 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
3398 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) {
3399 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3400 TORCH_INTERNAL_ASSERT(false,
3401 "mutating a non-functional tensor with a functional tensor is not allowed.",
3402 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3403 } else {
3404 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3405 at::AutoDispatchSkipFunctionalize guard;
3406 at::Tensor tmp_output = at::_ops::cudnn_convolution_transpose_out::call(self_, weight_, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out_);
3407 return out;;
3408 }
3409 } else {
3410 at::Tensor tmp_output;
3411 {
3412 at::AutoDispatchSkipFunctionalize guard;
3413 tmp_output = at::_ops::cudnn_convolution_transpose::call(self_, weight_, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
3414 }
3415 at::functionalization::impl::replace_(out, tmp_output);
3416 at::functionalization::impl::commit_update(out);
3417 at::functionalization::impl::sync(out);
3418 return out;
3419 }
3420 }
3421
3422 at::Tensor & _mps_convolution_transpose_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
3423 if (false) {
3424 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3425 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3426 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3427 auto self_meta = to_meta(self);
3428 auto weight_meta = to_meta(weight);
3429 auto out_meta = to_meta(out);
3430 at::AutoDispatchSkipFunctionalize func_guard;
3431 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3432 at::_ops::_mps_convolution_transpose_out::call(self_meta, weight_meta, padding, output_padding, stride, dilation, groups, out_meta);
3433 }
3434
3435 at::Tensor self_;
3436 if (at::functionalization::impl::isFunctionalTensor(self)) {
3437 at::functionalization::impl::sync(self);
3438 self_ = at::functionalization::impl::from_functional_tensor(self);
3439 } else {
3440 self_ = self;
3441 }
3442
3443 at::Tensor weight_;
3444 if (at::functionalization::impl::isFunctionalTensor(weight)) {
3445 at::functionalization::impl::sync(weight);
3446 weight_ = at::functionalization::impl::from_functional_tensor(weight);
3447 } else {
3448 weight_ = weight;
3449 }
3450
3451 at::Tensor out_;
3452 if (at::functionalization::impl::isFunctionalTensor(out)) {
3453 at::functionalization::impl::sync(out);
3454 out_ = at::functionalization::impl::from_functional_tensor(out);
3455 } else {
3456 out_ = out;
3457 }
3458 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
3459 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) {
3460 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3461 TORCH_INTERNAL_ASSERT(false,
3462 "mutating a non-functional tensor with a functional tensor is not allowed.",
3463 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3464 } else {
3465 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3466 at::AutoDispatchSkipFunctionalize guard;
3467 at::Tensor tmp_output = at::_ops::_mps_convolution_transpose_out::call(self_, weight_, padding, output_padding, stride, dilation, groups, out_);
3468 return out;;
3469 }
3470 } else {
3471 at::Tensor tmp_output;
3472 {
3473 at::AutoDispatchSkipFunctionalize guard;
3474 tmp_output = at::_ops::_mps_convolution_transpose::call(self_, weight_, padding, output_padding, stride, dilation, groups);
3475 }
3476 at::functionalization::impl::replace_(out, tmp_output);
3477 at::functionalization::impl::commit_update(out);
3478 at::functionalization::impl::sync(out);
3479 return out;
3480 }
3481 }
3482
3483 at::Tensor & cudnn_grid_sampler_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, at::Tensor & out) {
3484 if (false) {
3485 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3486 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3487 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3488 auto self_meta = to_meta(self);
3489 auto grid_meta = to_meta(grid);
3490 auto out_meta = to_meta(out);
3491 at::AutoDispatchSkipFunctionalize func_guard;
3492 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3493 at::_ops::cudnn_grid_sampler_out::call(self_meta, grid_meta, out_meta);
3494 }
3495
3496 at::Tensor self_;
3497 if (at::functionalization::impl::isFunctionalTensor(self)) {
3498 at::functionalization::impl::sync(self);
3499 self_ = at::functionalization::impl::from_functional_tensor(self);
3500 } else {
3501 self_ = self;
3502 }
3503
3504 at::Tensor grid_;
3505 if (at::functionalization::impl::isFunctionalTensor(grid)) {
3506 at::functionalization::impl::sync(grid);
3507 grid_ = at::functionalization::impl::from_functional_tensor(grid);
3508 } else {
3509 grid_ = grid;
3510 }
3511
3512 at::Tensor out_;
3513 if (at::functionalization::impl::isFunctionalTensor(out)) {
3514 at::functionalization::impl::sync(out);
3515 out_ = at::functionalization::impl::from_functional_tensor(out);
3516 } else {
3517 out_ = out;
3518 }
3519 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
3520 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grid))) {
3521 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3522 TORCH_INTERNAL_ASSERT(false,
3523 "mutating a non-functional tensor with a functional tensor is not allowed.",
3524 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3525 } else {
3526 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3527 at::AutoDispatchSkipFunctionalize guard;
3528 at::Tensor tmp_output = at::_ops::cudnn_grid_sampler_out::call(self_, grid_, out_);
3529 return out;;
3530 }
3531 } else {
3532 at::Tensor tmp_output;
3533 {
3534 at::AutoDispatchSkipFunctionalize guard;
3535 tmp_output = at::_ops::cudnn_grid_sampler::call(self_, grid_);
3536 }
3537 at::functionalization::impl::replace_(out, tmp_output);
3538 at::functionalization::impl::commit_update(out);
3539 at::functionalization::impl::sync(out);
3540 return out;
3541 }
3542 }
3543
3544 at::Tensor & cumprod_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
3545 if (false) {
3546 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3547 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3548 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3549 auto self_meta = to_meta(self);
3550 auto out_meta = to_meta(out);
3551 at::AutoDispatchSkipFunctionalize func_guard;
3552 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3553 at::_ops::cumprod_out::call(self_meta, dim, dtype, out_meta);
3554 }
3555
3556 at::Tensor self_;
3557 if (at::functionalization::impl::isFunctionalTensor(self)) {
3558 at::functionalization::impl::sync(self);
3559 self_ = at::functionalization::impl::from_functional_tensor(self);
3560 } else {
3561 self_ = self;
3562 }
3563
3564 at::Tensor out_;
3565 if (at::functionalization::impl::isFunctionalTensor(out)) {
3566 at::functionalization::impl::sync(out);
3567 out_ = at::functionalization::impl::from_functional_tensor(out);
3568 } else {
3569 out_ = out;
3570 }
3571 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
3572 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
3573 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3574 TORCH_INTERNAL_ASSERT(false,
3575 "mutating a non-functional tensor with a functional tensor is not allowed.",
3576 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3577 } else {
3578 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3579 at::AutoDispatchSkipFunctionalize guard;
3580 at::Tensor tmp_output = at::_ops::cumprod_out::call(self_, dim, dtype, out_);
3581 return out;;
3582 }
3583 } else {
3584 at::Tensor tmp_output;
3585 {
3586 at::AutoDispatchSkipFunctionalize guard;
3587 tmp_output = at::_ops::cumprod::call(self_, dim, dtype);
3588 }
3589 at::functionalization::impl::replace_(out, tmp_output);
3590 at::functionalization::impl::commit_update(out);
3591 at::functionalization::impl::sync(out);
3592 return out;
3593 }
3594 }
3595
3596 at::Tensor & cumprod_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
3597 if (true) {
3598 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3599 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3600 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3601 auto self_meta = to_meta(self);
3602 at::AutoDispatchSkipFunctionalize func_guard;
3603 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3604 at::_ops::cumprod_::call(self_meta, dim, dtype);
3605 }
3606
3607 at::Tensor self_;
3608 if (at::functionalization::impl::isFunctionalTensor(self)) {
3609 at::functionalization::impl::sync(self);
3610 self_ = at::functionalization::impl::from_functional_tensor(self);
3611 } else {
3612 self_ = self;
3613 }
3614 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
3615 if ((false)) {
3616 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3617 TORCH_INTERNAL_ASSERT(false,
3618 "mutating a non-functional tensor with a functional tensor is not allowed.",
3619 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3620 } else {
3621 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3622 at::AutoDispatchSkipFunctionalize guard;
3623 at::Tensor tmp_output = at::_ops::cumprod_::call(self_, dim, dtype);
3624 return self;;
3625 }
3626 } else {
3627 at::Tensor tmp_output;
3628 {
3629 at::AutoDispatchSkipFunctionalize guard;
3630 tmp_output = at::_ops::cumprod::call(self_, dim, dtype);
3631 }
3632 at::functionalization::impl::replace_(self, tmp_output);
3633 at::functionalization::impl::commit_update(self);
3634 at::functionalization::impl::sync(self);
3635 return self;
3636 }
3637 }
3638
3639 at::Tensor & cumprod_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
3640 if (false) {
3641 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3642 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3643 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3644 auto self_meta = to_meta(self);
3645 auto out_meta = to_meta(out);
3646 at::AutoDispatchSkipFunctionalize func_guard;
3647 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3648 at::_ops::cumprod_dimname_out::call(self_meta, dim, dtype, out_meta);
3649 }
3650
3651 at::Tensor self_;
3652 if (at::functionalization::impl::isFunctionalTensor(self)) {
3653 at::functionalization::impl::sync(self);
3654 self_ = at::functionalization::impl::from_functional_tensor(self);
3655 } else {
3656 self_ = self;
3657 }
3658
3659 at::Tensor out_;
3660 if (at::functionalization::impl::isFunctionalTensor(out)) {
3661 at::functionalization::impl::sync(out);
3662 out_ = at::functionalization::impl::from_functional_tensor(out);
3663 } else {
3664 out_ = out;
3665 }
3666 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
3667 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
3668 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3669 TORCH_INTERNAL_ASSERT(false,
3670 "mutating a non-functional tensor with a functional tensor is not allowed.",
3671 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3672 } else {
3673 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3674 at::AutoDispatchSkipFunctionalize guard;
3675 at::Tensor tmp_output = at::_ops::cumprod_dimname_out::call(self_, dim, dtype, out_);
3676 return out;;
3677 }
3678 } else {
3679 at::Tensor tmp_output;
3680 {
3681 at::AutoDispatchSkipFunctionalize guard;
3682 tmp_output = at::_ops::cumprod_dimname::call(self_, dim, dtype);
3683 }
3684 at::functionalization::impl::replace_(out, tmp_output);
3685 at::functionalization::impl::commit_update(out);
3686 at::functionalization::impl::sync(out);
3687 return out;
3688 }
3689 }
3690
3691 at::Tensor & cumprod__dimname(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
3692 if (true) {
3693 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3694 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3695 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3696 auto self_meta = to_meta(self);
3697 at::AutoDispatchSkipFunctionalize func_guard;
3698 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3699 at::_ops::cumprod__dimname::call(self_meta, dim, dtype);
3700 }
3701
3702 at::Tensor self_;
3703 if (at::functionalization::impl::isFunctionalTensor(self)) {
3704 at::functionalization::impl::sync(self);
3705 self_ = at::functionalization::impl::from_functional_tensor(self);
3706 } else {
3707 self_ = self;
3708 }
3709 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
3710 if ((false)) {
3711 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3712 TORCH_INTERNAL_ASSERT(false,
3713 "mutating a non-functional tensor with a functional tensor is not allowed.",
3714 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3715 } else {
3716 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3717 at::AutoDispatchSkipFunctionalize guard;
3718 at::Tensor tmp_output = at::_ops::cumprod__dimname::call(self_, dim, dtype);
3719 return self;;
3720 }
3721 } else {
3722 at::Tensor tmp_output;
3723 {
3724 at::AutoDispatchSkipFunctionalize guard;
3725 tmp_output = at::_ops::cumprod_dimname::call(self_, dim, dtype);
3726 }
3727 at::functionalization::impl::replace_(self, tmp_output);
3728 at::functionalization::impl::commit_update(self);
3729 at::functionalization::impl::sync(self);
3730 return self;
3731 }
3732 }
3733
3734 at::Tensor & diag_embed_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
3735 if (false) {
3736 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3737 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3738 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3739 auto self_meta = to_meta(self);
3740 auto out_meta = to_meta(out);
3741 at::AutoDispatchSkipFunctionalize func_guard;
3742 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3743 at::_ops::diag_embed_out::call(self_meta, offset, dim1, dim2, out_meta);
3744 }
3745
3746 at::Tensor self_;
3747 if (at::functionalization::impl::isFunctionalTensor(self)) {
3748 at::functionalization::impl::sync(self);
3749 self_ = at::functionalization::impl::from_functional_tensor(self);
3750 } else {
3751 self_ = self;
3752 }
3753
3754 at::Tensor out_;
3755 if (at::functionalization::impl::isFunctionalTensor(out)) {
3756 at::functionalization::impl::sync(out);
3757 out_ = at::functionalization::impl::from_functional_tensor(out);
3758 } else {
3759 out_ = out;
3760 }
3761 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
3762 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
3763 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3764 TORCH_INTERNAL_ASSERT(false,
3765 "mutating a non-functional tensor with a functional tensor is not allowed.",
3766 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3767 } else {
3768 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3769 at::AutoDispatchSkipFunctionalize guard;
3770 at::Tensor tmp_output = at::_ops::diag_embed_out::call(self_, offset, dim1, dim2, out_);
3771 return out;;
3772 }
3773 } else {
3774 at::Tensor tmp_output;
3775 {
3776 at::AutoDispatchSkipFunctionalize guard;
3777 tmp_output = at::_ops::diag_embed::call(self_, offset, dim1, dim2);
3778 }
3779 at::functionalization::impl::replace_(out, tmp_output);
3780 at::functionalization::impl::commit_update(out);
3781 at::functionalization::impl::sync(out);
3782 return out;
3783 }
3784 }
3785
3786 at::Tensor & diagonal_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
3787 if (false) {
3788 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3789 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3790 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3791 auto grad_output_meta = to_meta(grad_output);
3792 auto out_meta = to_meta(out);
3793 at::AutoDispatchSkipFunctionalize func_guard;
3794 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3795 at::_ops::diagonal_backward_out::call(grad_output_meta, input_sizes, offset, dim1, dim2, out_meta);
3796 }
3797
3798 at::Tensor grad_output_;
3799 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
3800 at::functionalization::impl::sync(grad_output);
3801 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
3802 } else {
3803 grad_output_ = grad_output;
3804 }
3805
3806 at::Tensor out_;
3807 if (at::functionalization::impl::isFunctionalTensor(out)) {
3808 at::functionalization::impl::sync(out);
3809 out_ = at::functionalization::impl::from_functional_tensor(out);
3810 } else {
3811 out_ = out;
3812 }
3813 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
3814 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
3815 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3816 TORCH_INTERNAL_ASSERT(false,
3817 "mutating a non-functional tensor with a functional tensor is not allowed.",
3818 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3819 } else {
3820 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3821 at::AutoDispatchSkipFunctionalize guard;
3822 at::Tensor tmp_output = at::_ops::diagonal_backward_out::call(grad_output_, input_sizes, offset, dim1, dim2, out_);
3823 return out;;
3824 }
3825 } else {
3826 at::Tensor tmp_output;
3827 {
3828 at::AutoDispatchSkipFunctionalize guard;
3829 tmp_output = at::_ops::diagonal_backward::call(grad_output_, input_sizes, offset, dim1, dim2);
3830 }
3831 at::functionalization::impl::replace_(out, tmp_output);
3832 at::functionalization::impl::commit_update(out);
3833 at::functionalization::impl::sync(out);
3834 return out;
3835 }
3836 }
3837
3838 at::Tensor & div_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3839 if (false) {
3840 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3841 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3842 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3843 auto self_meta = to_meta(self);
3844 auto other_meta = to_meta(other);
3845 auto out_meta = to_meta(out);
3846 at::AutoDispatchSkipFunctionalize func_guard;
3847 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3848 at::_ops::div_out::call(self_meta, other_meta, out_meta);
3849 }
3850
3851 at::Tensor self_;
3852 if (at::functionalization::impl::isFunctionalTensor(self)) {
3853 at::functionalization::impl::sync(self);
3854 self_ = at::functionalization::impl::from_functional_tensor(self);
3855 } else {
3856 self_ = self;
3857 }
3858
3859 at::Tensor other_;
3860 if (at::functionalization::impl::isFunctionalTensor(other)) {
3861 at::functionalization::impl::sync(other);
3862 other_ = at::functionalization::impl::from_functional_tensor(other);
3863 } else {
3864 other_ = other;
3865 }
3866
3867 at::Tensor out_;
3868 if (at::functionalization::impl::isFunctionalTensor(out)) {
3869 at::functionalization::impl::sync(out);
3870 out_ = at::functionalization::impl::from_functional_tensor(out);
3871 } else {
3872 out_ = out;
3873 }
3874 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
3875 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
3876 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3877 TORCH_INTERNAL_ASSERT(false,
3878 "mutating a non-functional tensor with a functional tensor is not allowed.",
3879 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3880 } else {
3881 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3882 at::AutoDispatchSkipFunctionalize guard;
3883 at::Tensor tmp_output = at::_ops::div_out::call(self_, other_, out_);
3884 return out;;
3885 }
3886 } else {
3887 at::Tensor tmp_output;
3888 {
3889 at::AutoDispatchSkipFunctionalize guard;
3890 tmp_output = at::_ops::div_Tensor::call(self_, other_);
3891 }
3892 at::functionalization::impl::replace_(out, tmp_output);
3893 at::functionalization::impl::commit_update(out);
3894 at::functionalization::impl::sync(out);
3895 return out;
3896 }
3897 }
3898
3899 at::Tensor & div__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
3900 if (true) {
3901 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3902 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3903 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3904 auto self_meta = to_meta(self);
3905 auto other_meta = to_meta(other);
3906 at::AutoDispatchSkipFunctionalize func_guard;
3907 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3908 at::_ops::div__Tensor::call(self_meta, other_meta);
3909 }
3910
3911 at::Tensor self_;
3912 if (at::functionalization::impl::isFunctionalTensor(self)) {
3913 at::functionalization::impl::sync(self);
3914 self_ = at::functionalization::impl::from_functional_tensor(self);
3915 } else {
3916 self_ = self;
3917 }
3918
3919 at::Tensor other_;
3920 if (at::functionalization::impl::isFunctionalTensor(other)) {
3921 at::functionalization::impl::sync(other);
3922 other_ = at::functionalization::impl::from_functional_tensor(other);
3923 } else {
3924 other_ = other;
3925 }
3926 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
3927 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
3928 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3929 TORCH_INTERNAL_ASSERT(false,
3930 "mutating a non-functional tensor with a functional tensor is not allowed.",
3931 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3932 } else {
3933 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3934 at::AutoDispatchSkipFunctionalize guard;
3935 at::Tensor tmp_output = at::_ops::div__Tensor::call(self_, other_);
3936 return self;;
3937 }
3938 } else {
3939 at::Tensor tmp_output;
3940 {
3941 at::AutoDispatchSkipFunctionalize guard;
3942 tmp_output = at::_ops::div_Tensor::call(self_, other_);
3943 }
3944 at::functionalization::impl::replace_(self, tmp_output);
3945 at::functionalization::impl::commit_update(self);
3946 at::functionalization::impl::sync(self);
3947 return self;
3948 }
3949 }
3950
3951 at::Tensor & div_out_out_mode(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
3952 if (false) {
3953 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
3954 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
3955 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
3956 auto self_meta = to_meta(self);
3957 auto other_meta = to_meta(other);
3958 auto out_meta = to_meta(out);
3959 at::AutoDispatchSkipFunctionalize func_guard;
3960 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
3961 at::_ops::div_out_mode::call(self_meta, other_meta, rounding_mode, out_meta);
3962 }
3963
3964 at::Tensor self_;
3965 if (at::functionalization::impl::isFunctionalTensor(self)) {
3966 at::functionalization::impl::sync(self);
3967 self_ = at::functionalization::impl::from_functional_tensor(self);
3968 } else {
3969 self_ = self;
3970 }
3971
3972 at::Tensor other_;
3973 if (at::functionalization::impl::isFunctionalTensor(other)) {
3974 at::functionalization::impl::sync(other);
3975 other_ = at::functionalization::impl::from_functional_tensor(other);
3976 } else {
3977 other_ = other;
3978 }
3979
3980 at::Tensor out_;
3981 if (at::functionalization::impl::isFunctionalTensor(out)) {
3982 at::functionalization::impl::sync(out);
3983 out_ = at::functionalization::impl::from_functional_tensor(out);
3984 } else {
3985 out_ = out;
3986 }
3987 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
3988 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
3989 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
3990 TORCH_INTERNAL_ASSERT(false,
3991 "mutating a non-functional tensor with a functional tensor is not allowed.",
3992 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
3993 } else {
3994 // case 2: arguments are not functional tensors, so we no-op and redispatch.
3995 at::AutoDispatchSkipFunctionalize guard;
3996 at::Tensor tmp_output = at::_ops::div_out_mode::call(self_, other_, rounding_mode, out_);
3997 return out;;
3998 }
3999 } else {
4000 at::Tensor tmp_output;
4001 {
4002 at::AutoDispatchSkipFunctionalize guard;
4003 tmp_output = at::_ops::div_Tensor_mode::call(self_, other_, rounding_mode);
4004 }
4005 at::functionalization::impl::replace_(out, tmp_output);
4006 at::functionalization::impl::commit_update(out);
4007 at::functionalization::impl::sync(out);
4008 return out;
4009 }
4010 }
4011
4012 at::Tensor & div__Tensor_mode(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
4013 if (true) {
4014 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4015 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4016 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4017 auto self_meta = to_meta(self);
4018 auto other_meta = to_meta(other);
4019 at::AutoDispatchSkipFunctionalize func_guard;
4020 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4021 at::_ops::div__Tensor_mode::call(self_meta, other_meta, rounding_mode);
4022 }
4023
4024 at::Tensor self_;
4025 if (at::functionalization::impl::isFunctionalTensor(self)) {
4026 at::functionalization::impl::sync(self);
4027 self_ = at::functionalization::impl::from_functional_tensor(self);
4028 } else {
4029 self_ = self;
4030 }
4031
4032 at::Tensor other_;
4033 if (at::functionalization::impl::isFunctionalTensor(other)) {
4034 at::functionalization::impl::sync(other);
4035 other_ = at::functionalization::impl::from_functional_tensor(other);
4036 } else {
4037 other_ = other;
4038 }
4039 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
4040 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
4041 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4042 TORCH_INTERNAL_ASSERT(false,
4043 "mutating a non-functional tensor with a functional tensor is not allowed.",
4044 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4045 } else {
4046 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4047 at::AutoDispatchSkipFunctionalize guard;
4048 at::Tensor tmp_output = at::_ops::div__Tensor_mode::call(self_, other_, rounding_mode);
4049 return self;;
4050 }
4051 } else {
4052 at::Tensor tmp_output;
4053 {
4054 at::AutoDispatchSkipFunctionalize guard;
4055 tmp_output = at::_ops::div_Tensor_mode::call(self_, other_, rounding_mode);
4056 }
4057 at::functionalization::impl::replace_(self, tmp_output);
4058 at::functionalization::impl::commit_update(self);
4059 at::functionalization::impl::sync(self);
4060 return self;
4061 }
4062 }
4063
4064 at::Tensor & div_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
4065 if (false) {
4066 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4067 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4068 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4069 auto self_meta = to_meta(self);
4070 auto out_meta = to_meta(out);
4071 at::AutoDispatchSkipFunctionalize func_guard;
4072 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4073 at::_ops::div_Scalar_out::call(self_meta, other, out_meta);
4074 }
4075
4076 at::Tensor self_;
4077 if (at::functionalization::impl::isFunctionalTensor(self)) {
4078 at::functionalization::impl::sync(self);
4079 self_ = at::functionalization::impl::from_functional_tensor(self);
4080 } else {
4081 self_ = self;
4082 }
4083
4084 at::Tensor out_;
4085 if (at::functionalization::impl::isFunctionalTensor(out)) {
4086 at::functionalization::impl::sync(out);
4087 out_ = at::functionalization::impl::from_functional_tensor(out);
4088 } else {
4089 out_ = out;
4090 }
4091 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
4092 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
4093 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4094 TORCH_INTERNAL_ASSERT(false,
4095 "mutating a non-functional tensor with a functional tensor is not allowed.",
4096 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4097 } else {
4098 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4099 at::AutoDispatchSkipFunctionalize guard;
4100 at::Tensor tmp_output = at::_ops::div_Scalar_out::call(self_, other, out_);
4101 return out;;
4102 }
4103 } else {
4104 at::Tensor tmp_output;
4105 {
4106 at::AutoDispatchSkipFunctionalize guard;
4107 tmp_output = at::_ops::div_Scalar::call(self_, other);
4108 }
4109 at::functionalization::impl::replace_(out, tmp_output);
4110 at::functionalization::impl::commit_update(out);
4111 at::functionalization::impl::sync(out);
4112 return out;
4113 }
4114 }
4115
4116 at::Tensor & div__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
4117 if (true) {
4118 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4119 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4120 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4121 auto self_meta = to_meta(self);
4122 at::AutoDispatchSkipFunctionalize func_guard;
4123 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4124 at::_ops::div__Scalar::call(self_meta, other);
4125 }
4126
4127 at::Tensor self_;
4128 if (at::functionalization::impl::isFunctionalTensor(self)) {
4129 at::functionalization::impl::sync(self);
4130 self_ = at::functionalization::impl::from_functional_tensor(self);
4131 } else {
4132 self_ = self;
4133 }
4134 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
4135 if ((false)) {
4136 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4137 TORCH_INTERNAL_ASSERT(false,
4138 "mutating a non-functional tensor with a functional tensor is not allowed.",
4139 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4140 } else {
4141 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4142 at::AutoDispatchSkipFunctionalize guard;
4143 at::Tensor tmp_output = at::_ops::div__Scalar::call(self_, other);
4144 return self;;
4145 }
4146 } else {
4147 at::Tensor tmp_output;
4148 {
4149 at::AutoDispatchSkipFunctionalize guard;
4150 tmp_output = at::_ops::div_Scalar::call(self_, other);
4151 }
4152 at::functionalization::impl::replace_(self, tmp_output);
4153 at::functionalization::impl::commit_update(self);
4154 at::functionalization::impl::sync(self);
4155 return self;
4156 }
4157 }
4158
4159 at::Tensor & div_out_Scalar_mode_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
4160 if (false) {
4161 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4162 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4163 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4164 auto self_meta = to_meta(self);
4165 auto out_meta = to_meta(out);
4166 at::AutoDispatchSkipFunctionalize func_guard;
4167 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4168 at::_ops::div_Scalar_mode_out::call(self_meta, other, rounding_mode, out_meta);
4169 }
4170
4171 at::Tensor self_;
4172 if (at::functionalization::impl::isFunctionalTensor(self)) {
4173 at::functionalization::impl::sync(self);
4174 self_ = at::functionalization::impl::from_functional_tensor(self);
4175 } else {
4176 self_ = self;
4177 }
4178
4179 at::Tensor out_;
4180 if (at::functionalization::impl::isFunctionalTensor(out)) {
4181 at::functionalization::impl::sync(out);
4182 out_ = at::functionalization::impl::from_functional_tensor(out);
4183 } else {
4184 out_ = out;
4185 }
4186 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
4187 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
4188 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4189 TORCH_INTERNAL_ASSERT(false,
4190 "mutating a non-functional tensor with a functional tensor is not allowed.",
4191 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4192 } else {
4193 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4194 at::AutoDispatchSkipFunctionalize guard;
4195 at::Tensor tmp_output = at::_ops::div_Scalar_mode_out::call(self_, other, rounding_mode, out_);
4196 return out;;
4197 }
4198 } else {
4199 at::Tensor tmp_output;
4200 {
4201 at::AutoDispatchSkipFunctionalize guard;
4202 tmp_output = at::_ops::div_Scalar_mode::call(self_, other, rounding_mode);
4203 }
4204 at::functionalization::impl::replace_(out, tmp_output);
4205 at::functionalization::impl::commit_update(out);
4206 at::functionalization::impl::sync(out);
4207 return out;
4208 }
4209 }
4210
4211 at::Tensor & div__Scalar_mode(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
4212 if (true) {
4213 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4214 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4215 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4216 auto self_meta = to_meta(self);
4217 at::AutoDispatchSkipFunctionalize func_guard;
4218 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4219 at::_ops::div__Scalar_mode::call(self_meta, other, rounding_mode);
4220 }
4221
4222 at::Tensor self_;
4223 if (at::functionalization::impl::isFunctionalTensor(self)) {
4224 at::functionalization::impl::sync(self);
4225 self_ = at::functionalization::impl::from_functional_tensor(self);
4226 } else {
4227 self_ = self;
4228 }
4229 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
4230 if ((false)) {
4231 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4232 TORCH_INTERNAL_ASSERT(false,
4233 "mutating a non-functional tensor with a functional tensor is not allowed.",
4234 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4235 } else {
4236 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4237 at::AutoDispatchSkipFunctionalize guard;
4238 at::Tensor tmp_output = at::_ops::div__Scalar_mode::call(self_, other, rounding_mode);
4239 return self;;
4240 }
4241 } else {
4242 at::Tensor tmp_output;
4243 {
4244 at::AutoDispatchSkipFunctionalize guard;
4245 tmp_output = at::_ops::div_Scalar_mode::call(self_, other, rounding_mode);
4246 }
4247 at::functionalization::impl::replace_(self, tmp_output);
4248 at::functionalization::impl::commit_update(self);
4249 at::functionalization::impl::sync(self);
4250 return self;
4251 }
4252 }
4253
4254 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
4255 if (false) {
4256 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4257 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4258 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4259 auto weight_meta = to_meta(weight);
4260 auto indices_meta = to_meta(indices);
4261 auto offsets_meta = to_meta(offsets);
4262 auto per_sample_weights_meta = to_meta(per_sample_weights);
4263 auto out0_meta = to_meta(out0);
4264 auto out1_meta = to_meta(out1);
4265 auto out2_meta = to_meta(out2);
4266 auto out3_meta = to_meta(out3);
4267 at::AutoDispatchSkipFunctionalize func_guard;
4268 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4269 at::_ops::_embedding_bag_out::call(weight_meta, indices_meta, offsets_meta, scale_grad_by_freq, mode, sparse, per_sample_weights_meta, include_last_offset, padding_idx, out0_meta, out1_meta, out2_meta, out3_meta);
4270 }
4271
4272 at::Tensor weight_;
4273 if (at::functionalization::impl::isFunctionalTensor(weight)) {
4274 at::functionalization::impl::sync(weight);
4275 weight_ = at::functionalization::impl::from_functional_tensor(weight);
4276 } else {
4277 weight_ = weight;
4278 }
4279
4280 at::Tensor indices_;
4281 if (at::functionalization::impl::isFunctionalTensor(indices)) {
4282 at::functionalization::impl::sync(indices);
4283 indices_ = at::functionalization::impl::from_functional_tensor(indices);
4284 } else {
4285 indices_ = indices;
4286 }
4287
4288 at::Tensor offsets_;
4289 if (at::functionalization::impl::isFunctionalTensor(offsets)) {
4290 at::functionalization::impl::sync(offsets);
4291 offsets_ = at::functionalization::impl::from_functional_tensor(offsets);
4292 } else {
4293 offsets_ = offsets;
4294 }
4295
4296 c10::optional<at::Tensor> per_sample_weights_;
4297 if (at::functionalization::impl::isFunctionalTensor(per_sample_weights)) {
4298 at::functionalization::impl::sync(per_sample_weights);
4299 per_sample_weights_ = at::functionalization::impl::from_functional_tensor(per_sample_weights);
4300 } else {
4301 per_sample_weights_ = per_sample_weights;
4302 }
4303
4304 at::Tensor out0_;
4305 if (at::functionalization::impl::isFunctionalTensor(out0)) {
4306 at::functionalization::impl::sync(out0);
4307 out0_ = at::functionalization::impl::from_functional_tensor(out0);
4308 } else {
4309 out0_ = out0;
4310 }
4311
4312 at::Tensor out1_;
4313 if (at::functionalization::impl::isFunctionalTensor(out1)) {
4314 at::functionalization::impl::sync(out1);
4315 out1_ = at::functionalization::impl::from_functional_tensor(out1);
4316 } else {
4317 out1_ = out1;
4318 }
4319
4320 at::Tensor out2_;
4321 if (at::functionalization::impl::isFunctionalTensor(out2)) {
4322 at::functionalization::impl::sync(out2);
4323 out2_ = at::functionalization::impl::from_functional_tensor(out2);
4324 } else {
4325 out2_ = out2;
4326 }
4327
4328 at::Tensor out3_;
4329 if (at::functionalization::impl::isFunctionalTensor(out3)) {
4330 at::functionalization::impl::sync(out3);
4331 out3_ = at::functionalization::impl::from_functional_tensor(out3);
4332 } else {
4333 out3_ = out3;
4334 }
4335 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) {
4336 if ((false || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(offsets) || at::functionalization::impl::isFunctionalTensor(per_sample_weights))) {
4337 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4338 TORCH_INTERNAL_ASSERT(false,
4339 "mutating a non-functional tensor with a functional tensor is not allowed.",
4340 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4341 } else {
4342 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4343 at::AutoDispatchSkipFunctionalize guard;
4344 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_embedding_bag_out::call(weight_, indices_, offsets_, scale_grad_by_freq, mode, sparse, per_sample_weights_, include_last_offset, padding_idx, out0_, out1_, out2_, out3_);
4345 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);;
4346 }
4347 } else {
4348 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
4349 {
4350 at::AutoDispatchSkipFunctionalize guard;
4351 tmp_output = at::_ops::_embedding_bag::call(weight_, indices_, offsets_, scale_grad_by_freq, mode, sparse, per_sample_weights_, include_last_offset, padding_idx);
4352 }
4353 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
4354 at::functionalization::impl::commit_update(out0);
4355 at::functionalization::impl::sync(out0);
4356 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
4357 at::functionalization::impl::commit_update(out1);
4358 at::functionalization::impl::sync(out1);
4359 at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
4360 at::functionalization::impl::commit_update(out2);
4361 at::functionalization::impl::sync(out2);
4362 at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
4363 at::functionalization::impl::commit_update(out3);
4364 at::functionalization::impl::sync(out3);
4365 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
4366 }
4367 }
4368
4369 at::Tensor & _embedding_bag_per_sample_weights_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out) {
4370 if (false) {
4371 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4372 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4373 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4374 auto grad_meta = to_meta(grad);
4375 auto weight_meta = to_meta(weight);
4376 auto indices_meta = to_meta(indices);
4377 auto offsets_meta = to_meta(offsets);
4378 auto offset2bag_meta = to_meta(offset2bag);
4379 auto out_meta = to_meta(out);
4380 at::AutoDispatchSkipFunctionalize func_guard;
4381 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4382 at::_ops::_embedding_bag_per_sample_weights_backward_out::call(grad_meta, weight_meta, indices_meta, offsets_meta, offset2bag_meta, mode, padding_idx, out_meta);
4383 }
4384
4385 at::Tensor grad_;
4386 if (at::functionalization::impl::isFunctionalTensor(grad)) {
4387 at::functionalization::impl::sync(grad);
4388 grad_ = at::functionalization::impl::from_functional_tensor(grad);
4389 } else {
4390 grad_ = grad;
4391 }
4392
4393 at::Tensor weight_;
4394 if (at::functionalization::impl::isFunctionalTensor(weight)) {
4395 at::functionalization::impl::sync(weight);
4396 weight_ = at::functionalization::impl::from_functional_tensor(weight);
4397 } else {
4398 weight_ = weight;
4399 }
4400
4401 at::Tensor indices_;
4402 if (at::functionalization::impl::isFunctionalTensor(indices)) {
4403 at::functionalization::impl::sync(indices);
4404 indices_ = at::functionalization::impl::from_functional_tensor(indices);
4405 } else {
4406 indices_ = indices;
4407 }
4408
4409 at::Tensor offsets_;
4410 if (at::functionalization::impl::isFunctionalTensor(offsets)) {
4411 at::functionalization::impl::sync(offsets);
4412 offsets_ = at::functionalization::impl::from_functional_tensor(offsets);
4413 } else {
4414 offsets_ = offsets;
4415 }
4416
4417 at::Tensor offset2bag_;
4418 if (at::functionalization::impl::isFunctionalTensor(offset2bag)) {
4419 at::functionalization::impl::sync(offset2bag);
4420 offset2bag_ = at::functionalization::impl::from_functional_tensor(offset2bag);
4421 } else {
4422 offset2bag_ = offset2bag;
4423 }
4424
4425 at::Tensor out_;
4426 if (at::functionalization::impl::isFunctionalTensor(out)) {
4427 at::functionalization::impl::sync(out);
4428 out_ = at::functionalization::impl::from_functional_tensor(out);
4429 } else {
4430 out_ = out;
4431 }
4432 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
4433 if ((false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(offsets) || at::functionalization::impl::isFunctionalTensor(offset2bag))) {
4434 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4435 TORCH_INTERNAL_ASSERT(false,
4436 "mutating a non-functional tensor with a functional tensor is not allowed.",
4437 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4438 } else {
4439 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4440 at::AutoDispatchSkipFunctionalize guard;
4441 at::Tensor tmp_output = at::_ops::_embedding_bag_per_sample_weights_backward_out::call(grad_, weight_, indices_, offsets_, offset2bag_, mode, padding_idx, out_);
4442 return out;;
4443 }
4444 } else {
4445 at::Tensor tmp_output;
4446 {
4447 at::AutoDispatchSkipFunctionalize guard;
4448 tmp_output = at::_ops::_embedding_bag_per_sample_weights_backward::call(grad_, weight_, indices_, offsets_, offset2bag_, mode, padding_idx);
4449 }
4450 at::functionalization::impl::replace_(out, tmp_output);
4451 at::functionalization::impl::commit_update(out);
4452 at::functionalization::impl::sync(out);
4453 return out;
4454 }
4455 }
4456
4457 at::Tensor & new_full_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
4458 if (false) {
4459 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4460 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4461 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4462 auto self_meta = to_meta(self);
4463 auto out_meta = to_meta(out);
4464 at::AutoDispatchSkipFunctionalize func_guard;
4465 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4466 at::_ops::new_full_out::call(self_meta, size, fill_value, out_meta);
4467 }
4468
4469 at::Tensor self_;
4470 if (at::functionalization::impl::isFunctionalTensor(self)) {
4471 at::functionalization::impl::sync(self);
4472 self_ = at::functionalization::impl::from_functional_tensor(self);
4473 } else {
4474 self_ = self;
4475 }
4476
4477 at::Tensor out_;
4478 if (at::functionalization::impl::isFunctionalTensor(out)) {
4479 at::functionalization::impl::sync(out);
4480 out_ = at::functionalization::impl::from_functional_tensor(out);
4481 } else {
4482 out_ = out;
4483 }
4484 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
4485 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
4486 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4487 TORCH_INTERNAL_ASSERT(false,
4488 "mutating a non-functional tensor with a functional tensor is not allowed.",
4489 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4490 } else {
4491 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4492 at::AutoDispatchSkipFunctionalize guard;
4493 at::Tensor tmp_output = at::_ops::new_full_out::call(self_, size, fill_value, out_);
4494 return out;;
4495 }
4496 } else {
4497 at::Tensor tmp_output;
4498 {
4499 at::AutoDispatchSkipFunctionalize guard;
4500 tmp_output = at::_ops::new_full::call(self_, size, fill_value, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
4501 }
4502 at::functionalization::impl::replace_(out, tmp_output);
4503 at::functionalization::impl::commit_update(out);
4504 at::functionalization::impl::sync(out);
4505 return out;
4506 }
4507 }
4508
4509 at::Tensor & empty_quantized_out_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
4510 if (false) {
4511 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4512 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4513 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4514 auto qtensor_meta = to_meta(qtensor);
4515 auto out_meta = to_meta(out);
4516 at::AutoDispatchSkipFunctionalize func_guard;
4517 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4518 at::_ops::empty_quantized_out::call(size, qtensor_meta, memory_format, out_meta);
4519 }
4520
4521 at::Tensor qtensor_;
4522 if (at::functionalization::impl::isFunctionalTensor(qtensor)) {
4523 at::functionalization::impl::sync(qtensor);
4524 qtensor_ = at::functionalization::impl::from_functional_tensor(qtensor);
4525 } else {
4526 qtensor_ = qtensor;
4527 }
4528
4529 at::Tensor out_;
4530 if (at::functionalization::impl::isFunctionalTensor(out)) {
4531 at::functionalization::impl::sync(out);
4532 out_ = at::functionalization::impl::from_functional_tensor(out);
4533 } else {
4534 out_ = out;
4535 }
4536 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
4537 if ((false || at::functionalization::impl::isFunctionalTensor(qtensor))) {
4538 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4539 TORCH_INTERNAL_ASSERT(false,
4540 "mutating a non-functional tensor with a functional tensor is not allowed.",
4541 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4542 } else {
4543 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4544 at::AutoDispatchSkipFunctionalize guard;
4545 at::Tensor tmp_output = at::_ops::empty_quantized_out::call(size, qtensor_, memory_format, out_);
4546 return out;;
4547 }
4548 } else {
4549 at::Tensor tmp_output;
4550 {
4551 at::AutoDispatchSkipFunctionalize guard;
4552 tmp_output = at::_ops::empty_quantized::call(size, qtensor_, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, memory_format);
4553 }
4554 at::functionalization::impl::replace_(out, tmp_output);
4555 at::functionalization::impl::commit_update(out);
4556 at::functionalization::impl::sync(out);
4557 return out;
4558 }
4559 }
4560
4561 at::Tensor & empty_strided_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
4562 if (false) {
4563 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4564 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4565 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4566 auto out_meta = to_meta(out);
4567 at::AutoDispatchSkipFunctionalize func_guard;
4568 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4569 at::_ops::empty_strided_out::call(size, stride, out_meta);
4570 }
4571
4572 at::Tensor out_;
4573 if (at::functionalization::impl::isFunctionalTensor(out)) {
4574 at::functionalization::impl::sync(out);
4575 out_ = at::functionalization::impl::from_functional_tensor(out);
4576 } else {
4577 out_ = out;
4578 }
4579 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
4580 if ((false)) {
4581 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4582 TORCH_INTERNAL_ASSERT(false,
4583 "mutating a non-functional tensor with a functional tensor is not allowed.",
4584 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4585 } else {
4586 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4587 at::AutoDispatchSkipFunctionalize guard;
4588 at::Tensor tmp_output = at::_ops::empty_strided_out::call(size, stride, out_);
4589 return out;;
4590 }
4591 } else {
4592 at::Tensor tmp_output;
4593 {
4594 at::AutoDispatchSkipFunctionalize guard;
4595 tmp_output = at::_ops::empty_strided::call(size, stride, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
4596 }
4597 at::functionalization::impl::replace_(out, tmp_output);
4598 at::functionalization::impl::commit_update(out);
4599 at::functionalization::impl::sync(out);
4600 return out;
4601 }
4602 }
4603
4604 at::Tensor & exp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
4605 if (false) {
4606 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4607 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4608 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4609 auto self_meta = to_meta(self);
4610 auto out_meta = to_meta(out);
4611 at::AutoDispatchSkipFunctionalize func_guard;
4612 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4613 at::_ops::exp_out::call(self_meta, out_meta);
4614 }
4615
4616 at::Tensor self_;
4617 if (at::functionalization::impl::isFunctionalTensor(self)) {
4618 at::functionalization::impl::sync(self);
4619 self_ = at::functionalization::impl::from_functional_tensor(self);
4620 } else {
4621 self_ = self;
4622 }
4623
4624 at::Tensor out_;
4625 if (at::functionalization::impl::isFunctionalTensor(out)) {
4626 at::functionalization::impl::sync(out);
4627 out_ = at::functionalization::impl::from_functional_tensor(out);
4628 } else {
4629 out_ = out;
4630 }
4631 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
4632 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
4633 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4634 TORCH_INTERNAL_ASSERT(false,
4635 "mutating a non-functional tensor with a functional tensor is not allowed.",
4636 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4637 } else {
4638 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4639 at::AutoDispatchSkipFunctionalize guard;
4640 at::Tensor tmp_output = at::_ops::exp_out::call(self_, out_);
4641 return out;;
4642 }
4643 } else {
4644 at::Tensor tmp_output;
4645 {
4646 at::AutoDispatchSkipFunctionalize guard;
4647 tmp_output = at::_ops::exp::call(self_);
4648 }
4649 at::functionalization::impl::replace_(out, tmp_output);
4650 at::functionalization::impl::commit_update(out);
4651 at::functionalization::impl::sync(out);
4652 return out;
4653 }
4654 }
4655
4656 at::Tensor & exp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
4657 if (true) {
4658 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4659 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4660 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4661 auto self_meta = to_meta(self);
4662 at::AutoDispatchSkipFunctionalize func_guard;
4663 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4664 at::_ops::exp_::call(self_meta);
4665 }
4666
4667 at::Tensor self_;
4668 if (at::functionalization::impl::isFunctionalTensor(self)) {
4669 at::functionalization::impl::sync(self);
4670 self_ = at::functionalization::impl::from_functional_tensor(self);
4671 } else {
4672 self_ = self;
4673 }
4674 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
4675 if ((false)) {
4676 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4677 TORCH_INTERNAL_ASSERT(false,
4678 "mutating a non-functional tensor with a functional tensor is not allowed.",
4679 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4680 } else {
4681 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4682 at::AutoDispatchSkipFunctionalize guard;
4683 at::Tensor tmp_output = at::_ops::exp_::call(self_);
4684 return self;;
4685 }
4686 } else {
4687 at::Tensor tmp_output;
4688 {
4689 at::AutoDispatchSkipFunctionalize guard;
4690 tmp_output = at::_ops::exp::call(self_);
4691 }
4692 at::functionalization::impl::replace_(self, tmp_output);
4693 at::functionalization::impl::commit_update(self);
4694 at::functionalization::impl::sync(self);
4695 return self;
4696 }
4697 }
4698
4699 at::Tensor & expm1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
4700 if (false) {
4701 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4702 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4703 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4704 auto self_meta = to_meta(self);
4705 auto out_meta = to_meta(out);
4706 at::AutoDispatchSkipFunctionalize func_guard;
4707 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4708 at::_ops::expm1_out::call(self_meta, out_meta);
4709 }
4710
4711 at::Tensor self_;
4712 if (at::functionalization::impl::isFunctionalTensor(self)) {
4713 at::functionalization::impl::sync(self);
4714 self_ = at::functionalization::impl::from_functional_tensor(self);
4715 } else {
4716 self_ = self;
4717 }
4718
4719 at::Tensor out_;
4720 if (at::functionalization::impl::isFunctionalTensor(out)) {
4721 at::functionalization::impl::sync(out);
4722 out_ = at::functionalization::impl::from_functional_tensor(out);
4723 } else {
4724 out_ = out;
4725 }
4726 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
4727 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
4728 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4729 TORCH_INTERNAL_ASSERT(false,
4730 "mutating a non-functional tensor with a functional tensor is not allowed.",
4731 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4732 } else {
4733 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4734 at::AutoDispatchSkipFunctionalize guard;
4735 at::Tensor tmp_output = at::_ops::expm1_out::call(self_, out_);
4736 return out;;
4737 }
4738 } else {
4739 at::Tensor tmp_output;
4740 {
4741 at::AutoDispatchSkipFunctionalize guard;
4742 tmp_output = at::_ops::expm1::call(self_);
4743 }
4744 at::functionalization::impl::replace_(out, tmp_output);
4745 at::functionalization::impl::commit_update(out);
4746 at::functionalization::impl::sync(out);
4747 return out;
4748 }
4749 }
4750
4751 at::Tensor & expm1_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
4752 if (true) {
4753 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4754 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4755 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4756 auto self_meta = to_meta(self);
4757 at::AutoDispatchSkipFunctionalize func_guard;
4758 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4759 at::_ops::expm1_::call(self_meta);
4760 }
4761
4762 at::Tensor self_;
4763 if (at::functionalization::impl::isFunctionalTensor(self)) {
4764 at::functionalization::impl::sync(self);
4765 self_ = at::functionalization::impl::from_functional_tensor(self);
4766 } else {
4767 self_ = self;
4768 }
4769 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
4770 if ((false)) {
4771 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4772 TORCH_INTERNAL_ASSERT(false,
4773 "mutating a non-functional tensor with a functional tensor is not allowed.",
4774 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4775 } else {
4776 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4777 at::AutoDispatchSkipFunctionalize guard;
4778 at::Tensor tmp_output = at::_ops::expm1_::call(self_);
4779 return self;;
4780 }
4781 } else {
4782 at::Tensor tmp_output;
4783 {
4784 at::AutoDispatchSkipFunctionalize guard;
4785 tmp_output = at::_ops::expm1::call(self_);
4786 }
4787 at::functionalization::impl::replace_(self, tmp_output);
4788 at::functionalization::impl::commit_update(self);
4789 at::functionalization::impl::sync(self);
4790 return self;
4791 }
4792 }
4793
4794 at::Tensor & fill_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value, at::Tensor & out) {
4795 if (false) {
4796 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4797 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4798 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4799 auto self_meta = to_meta(self);
4800 auto out_meta = to_meta(out);
4801 at::AutoDispatchSkipFunctionalize func_guard;
4802 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4803 at::_ops::fill_Scalar_out::call(self_meta, value, out_meta);
4804 }
4805
4806 at::Tensor self_;
4807 if (at::functionalization::impl::isFunctionalTensor(self)) {
4808 at::functionalization::impl::sync(self);
4809 self_ = at::functionalization::impl::from_functional_tensor(self);
4810 } else {
4811 self_ = self;
4812 }
4813
4814 at::Tensor out_;
4815 if (at::functionalization::impl::isFunctionalTensor(out)) {
4816 at::functionalization::impl::sync(out);
4817 out_ = at::functionalization::impl::from_functional_tensor(out);
4818 } else {
4819 out_ = out;
4820 }
4821 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
4822 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
4823 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4824 TORCH_INTERNAL_ASSERT(false,
4825 "mutating a non-functional tensor with a functional tensor is not allowed.",
4826 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4827 } else {
4828 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4829 at::AutoDispatchSkipFunctionalize guard;
4830 at::Tensor tmp_output = at::_ops::fill_Scalar_out::call(self_, value, out_);
4831 return out;;
4832 }
4833 } else {
4834 at::Tensor tmp_output;
4835 {
4836 at::AutoDispatchSkipFunctionalize guard;
4837 tmp_output = at::_ops::fill_Scalar::call(self_, value);
4838 }
4839 at::functionalization::impl::replace_(out, tmp_output);
4840 at::functionalization::impl::commit_update(out);
4841 at::functionalization::impl::sync(out);
4842 return out;
4843 }
4844 }
4845
4846 at::Tensor & fill__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & value) {
4847 if (true) {
4848 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4849 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4850 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4851 auto self_meta = to_meta(self);
4852 at::AutoDispatchSkipFunctionalize func_guard;
4853 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4854 at::_ops::fill__Scalar::call(self_meta, value);
4855 }
4856
4857 at::Tensor self_;
4858 if (at::functionalization::impl::isFunctionalTensor(self)) {
4859 at::functionalization::impl::sync(self);
4860 self_ = at::functionalization::impl::from_functional_tensor(self);
4861 } else {
4862 self_ = self;
4863 }
4864 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
4865 if ((false)) {
4866 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4867 TORCH_INTERNAL_ASSERT(false,
4868 "mutating a non-functional tensor with a functional tensor is not allowed.",
4869 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4870 } else {
4871 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4872 at::AutoDispatchSkipFunctionalize guard;
4873 at::Tensor tmp_output = at::_ops::fill__Scalar::call(self_, value);
4874 return self;;
4875 }
4876 } else {
4877 at::Tensor tmp_output;
4878 {
4879 at::AutoDispatchSkipFunctionalize guard;
4880 tmp_output = at::_ops::fill_Scalar::call(self_, value);
4881 }
4882 at::functionalization::impl::replace_(self, tmp_output);
4883 at::functionalization::impl::commit_update(self);
4884 at::functionalization::impl::sync(self);
4885 return self;
4886 }
4887 }
4888
4889 at::Tensor & fill_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value, at::Tensor & out) {
4890 if (false) {
4891 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4892 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4893 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4894 auto self_meta = to_meta(self);
4895 auto value_meta = to_meta(value);
4896 auto out_meta = to_meta(out);
4897 at::AutoDispatchSkipFunctionalize func_guard;
4898 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4899 at::_ops::fill_Tensor_out::call(self_meta, value_meta, out_meta);
4900 }
4901
4902 at::Tensor self_;
4903 if (at::functionalization::impl::isFunctionalTensor(self)) {
4904 at::functionalization::impl::sync(self);
4905 self_ = at::functionalization::impl::from_functional_tensor(self);
4906 } else {
4907 self_ = self;
4908 }
4909
4910 at::Tensor value_;
4911 if (at::functionalization::impl::isFunctionalTensor(value)) {
4912 at::functionalization::impl::sync(value);
4913 value_ = at::functionalization::impl::from_functional_tensor(value);
4914 } else {
4915 value_ = value;
4916 }
4917
4918 at::Tensor out_;
4919 if (at::functionalization::impl::isFunctionalTensor(out)) {
4920 at::functionalization::impl::sync(out);
4921 out_ = at::functionalization::impl::from_functional_tensor(out);
4922 } else {
4923 out_ = out;
4924 }
4925 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
4926 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(value))) {
4927 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4928 TORCH_INTERNAL_ASSERT(false,
4929 "mutating a non-functional tensor with a functional tensor is not allowed.",
4930 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4931 } else {
4932 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4933 at::AutoDispatchSkipFunctionalize guard;
4934 at::Tensor tmp_output = at::_ops::fill_Tensor_out::call(self_, value_, out_);
4935 return out;;
4936 }
4937 } else {
4938 at::Tensor tmp_output;
4939 {
4940 at::AutoDispatchSkipFunctionalize guard;
4941 tmp_output = at::_ops::fill_Tensor::call(self_, value_);
4942 }
4943 at::functionalization::impl::replace_(out, tmp_output);
4944 at::functionalization::impl::commit_update(out);
4945 at::functionalization::impl::sync(out);
4946 return out;
4947 }
4948 }
4949
4950 at::Tensor & fill__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & value) {
4951 if (true) {
4952 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
4953 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
4954 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
4955 auto self_meta = to_meta(self);
4956 auto value_meta = to_meta(value);
4957 at::AutoDispatchSkipFunctionalize func_guard;
4958 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
4959 at::_ops::fill__Tensor::call(self_meta, value_meta);
4960 }
4961
4962 at::Tensor self_;
4963 if (at::functionalization::impl::isFunctionalTensor(self)) {
4964 at::functionalization::impl::sync(self);
4965 self_ = at::functionalization::impl::from_functional_tensor(self);
4966 } else {
4967 self_ = self;
4968 }
4969
4970 at::Tensor value_;
4971 if (at::functionalization::impl::isFunctionalTensor(value)) {
4972 at::functionalization::impl::sync(value);
4973 value_ = at::functionalization::impl::from_functional_tensor(value);
4974 } else {
4975 value_ = value;
4976 }
4977 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
4978 if ((false || at::functionalization::impl::isFunctionalTensor(value))) {
4979 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
4980 TORCH_INTERNAL_ASSERT(false,
4981 "mutating a non-functional tensor with a functional tensor is not allowed.",
4982 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
4983 } else {
4984 // case 2: arguments are not functional tensors, so we no-op and redispatch.
4985 at::AutoDispatchSkipFunctionalize guard;
4986 at::Tensor tmp_output = at::_ops::fill__Tensor::call(self_, value_);
4987 return self;;
4988 }
4989 } else {
4990 at::Tensor tmp_output;
4991 {
4992 at::AutoDispatchSkipFunctionalize guard;
4993 tmp_output = at::_ops::fill_Tensor::call(self_, value_);
4994 }
4995 at::functionalization::impl::replace_(self, tmp_output);
4996 at::functionalization::impl::commit_update(self);
4997 at::functionalization::impl::sync(self);
4998 return self;
4999 }
5000 }
5001
5002 at::Tensor & floor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
5003 if (false) {
5004 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5005 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5006 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5007 auto self_meta = to_meta(self);
5008 auto out_meta = to_meta(out);
5009 at::AutoDispatchSkipFunctionalize func_guard;
5010 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5011 at::_ops::floor_out::call(self_meta, out_meta);
5012 }
5013
5014 at::Tensor self_;
5015 if (at::functionalization::impl::isFunctionalTensor(self)) {
5016 at::functionalization::impl::sync(self);
5017 self_ = at::functionalization::impl::from_functional_tensor(self);
5018 } else {
5019 self_ = self;
5020 }
5021
5022 at::Tensor out_;
5023 if (at::functionalization::impl::isFunctionalTensor(out)) {
5024 at::functionalization::impl::sync(out);
5025 out_ = at::functionalization::impl::from_functional_tensor(out);
5026 } else {
5027 out_ = out;
5028 }
5029 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5030 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
5031 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5032 TORCH_INTERNAL_ASSERT(false,
5033 "mutating a non-functional tensor with a functional tensor is not allowed.",
5034 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5035 } else {
5036 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5037 at::AutoDispatchSkipFunctionalize guard;
5038 at::Tensor tmp_output = at::_ops::floor_out::call(self_, out_);
5039 return out;;
5040 }
5041 } else {
5042 at::Tensor tmp_output;
5043 {
5044 at::AutoDispatchSkipFunctionalize guard;
5045 tmp_output = at::_ops::floor::call(self_);
5046 }
5047 at::functionalization::impl::replace_(out, tmp_output);
5048 at::functionalization::impl::commit_update(out);
5049 at::functionalization::impl::sync(out);
5050 return out;
5051 }
5052 }
5053
5054 at::Tensor & floor_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
5055 if (true) {
5056 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5057 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5058 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5059 auto self_meta = to_meta(self);
5060 at::AutoDispatchSkipFunctionalize func_guard;
5061 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5062 at::_ops::floor_::call(self_meta);
5063 }
5064
5065 at::Tensor self_;
5066 if (at::functionalization::impl::isFunctionalTensor(self)) {
5067 at::functionalization::impl::sync(self);
5068 self_ = at::functionalization::impl::from_functional_tensor(self);
5069 } else {
5070 self_ = self;
5071 }
5072 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
5073 if ((false)) {
5074 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5075 TORCH_INTERNAL_ASSERT(false,
5076 "mutating a non-functional tensor with a functional tensor is not allowed.",
5077 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5078 } else {
5079 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5080 at::AutoDispatchSkipFunctionalize guard;
5081 at::Tensor tmp_output = at::_ops::floor_::call(self_);
5082 return self;;
5083 }
5084 } else {
5085 at::Tensor tmp_output;
5086 {
5087 at::AutoDispatchSkipFunctionalize guard;
5088 tmp_output = at::_ops::floor::call(self_);
5089 }
5090 at::functionalization::impl::replace_(self, tmp_output);
5091 at::functionalization::impl::commit_update(self);
5092 at::functionalization::impl::sync(self);
5093 return self;
5094 }
5095 }
5096
5097 at::Tensor & floor_divide_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
5098 if (false) {
5099 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5100 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5101 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5102 auto self_meta = to_meta(self);
5103 auto other_meta = to_meta(other);
5104 auto out_meta = to_meta(out);
5105 at::AutoDispatchSkipFunctionalize func_guard;
5106 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5107 at::_ops::floor_divide_out::call(self_meta, other_meta, out_meta);
5108 }
5109
5110 at::Tensor self_;
5111 if (at::functionalization::impl::isFunctionalTensor(self)) {
5112 at::functionalization::impl::sync(self);
5113 self_ = at::functionalization::impl::from_functional_tensor(self);
5114 } else {
5115 self_ = self;
5116 }
5117
5118 at::Tensor other_;
5119 if (at::functionalization::impl::isFunctionalTensor(other)) {
5120 at::functionalization::impl::sync(other);
5121 other_ = at::functionalization::impl::from_functional_tensor(other);
5122 } else {
5123 other_ = other;
5124 }
5125
5126 at::Tensor out_;
5127 if (at::functionalization::impl::isFunctionalTensor(out)) {
5128 at::functionalization::impl::sync(out);
5129 out_ = at::functionalization::impl::from_functional_tensor(out);
5130 } else {
5131 out_ = out;
5132 }
5133 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5134 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
5135 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5136 TORCH_INTERNAL_ASSERT(false,
5137 "mutating a non-functional tensor with a functional tensor is not allowed.",
5138 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5139 } else {
5140 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5141 at::AutoDispatchSkipFunctionalize guard;
5142 at::Tensor tmp_output = at::_ops::floor_divide_out::call(self_, other_, out_);
5143 return out;;
5144 }
5145 } else {
5146 at::Tensor tmp_output;
5147 {
5148 at::AutoDispatchSkipFunctionalize guard;
5149 tmp_output = at::_ops::floor_divide::call(self_, other_);
5150 }
5151 at::functionalization::impl::replace_(out, tmp_output);
5152 at::functionalization::impl::commit_update(out);
5153 at::functionalization::impl::sync(out);
5154 return out;
5155 }
5156 }
5157
5158 at::Tensor & floor_divide__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
5159 if (true) {
5160 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5161 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5162 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5163 auto self_meta = to_meta(self);
5164 auto other_meta = to_meta(other);
5165 at::AutoDispatchSkipFunctionalize func_guard;
5166 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5167 at::_ops::floor_divide__Tensor::call(self_meta, other_meta);
5168 }
5169
5170 at::Tensor self_;
5171 if (at::functionalization::impl::isFunctionalTensor(self)) {
5172 at::functionalization::impl::sync(self);
5173 self_ = at::functionalization::impl::from_functional_tensor(self);
5174 } else {
5175 self_ = self;
5176 }
5177
5178 at::Tensor other_;
5179 if (at::functionalization::impl::isFunctionalTensor(other)) {
5180 at::functionalization::impl::sync(other);
5181 other_ = at::functionalization::impl::from_functional_tensor(other);
5182 } else {
5183 other_ = other;
5184 }
5185 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
5186 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
5187 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5188 TORCH_INTERNAL_ASSERT(false,
5189 "mutating a non-functional tensor with a functional tensor is not allowed.",
5190 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5191 } else {
5192 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5193 at::AutoDispatchSkipFunctionalize guard;
5194 at::Tensor tmp_output = at::_ops::floor_divide__Tensor::call(self_, other_);
5195 return self;;
5196 }
5197 } else {
5198 at::Tensor tmp_output;
5199 {
5200 at::AutoDispatchSkipFunctionalize guard;
5201 tmp_output = at::_ops::floor_divide::call(self_, other_);
5202 }
5203 at::functionalization::impl::replace_(self, tmp_output);
5204 at::functionalization::impl::commit_update(self);
5205 at::functionalization::impl::sync(self);
5206 return self;
5207 }
5208 }
5209
5210 at::Tensor & full_out_names_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::Tensor & out) {
5211 if (false) {
5212 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5213 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5214 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5215 auto out_meta = to_meta(out);
5216 at::AutoDispatchSkipFunctionalize func_guard;
5217 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5218 at::_ops::full_names_out::call(size, fill_value, names, out_meta);
5219 }
5220
5221 at::Tensor out_;
5222 if (at::functionalization::impl::isFunctionalTensor(out)) {
5223 at::functionalization::impl::sync(out);
5224 out_ = at::functionalization::impl::from_functional_tensor(out);
5225 } else {
5226 out_ = out;
5227 }
5228 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5229 if ((false)) {
5230 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5231 TORCH_INTERNAL_ASSERT(false,
5232 "mutating a non-functional tensor with a functional tensor is not allowed.",
5233 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5234 } else {
5235 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5236 at::AutoDispatchSkipFunctionalize guard;
5237 at::Tensor tmp_output = at::_ops::full_names_out::call(size, fill_value, names, out_);
5238 return out;;
5239 }
5240 } else {
5241 at::Tensor tmp_output;
5242 {
5243 at::AutoDispatchSkipFunctionalize guard;
5244 tmp_output = at::_ops::full_names::call(size, fill_value, names, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
5245 }
5246 at::functionalization::impl::replace_(out, tmp_output);
5247 at::functionalization::impl::commit_update(out);
5248 at::functionalization::impl::sync(out);
5249 return out;
5250 }
5251 }
5252
5253 at::Tensor & full_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
5254 if (false) {
5255 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5256 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5257 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5258 auto out_meta = to_meta(out);
5259 at::AutoDispatchSkipFunctionalize func_guard;
5260 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5261 at::_ops::full_out::call(size, fill_value, out_meta);
5262 }
5263
5264 at::Tensor out_;
5265 if (at::functionalization::impl::isFunctionalTensor(out)) {
5266 at::functionalization::impl::sync(out);
5267 out_ = at::functionalization::impl::from_functional_tensor(out);
5268 } else {
5269 out_ = out;
5270 }
5271 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5272 if ((false)) {
5273 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5274 TORCH_INTERNAL_ASSERT(false,
5275 "mutating a non-functional tensor with a functional tensor is not allowed.",
5276 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5277 } else {
5278 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5279 at::AutoDispatchSkipFunctionalize guard;
5280 at::Tensor tmp_output = at::_ops::full_out::call(size, fill_value, out_);
5281 return out;;
5282 }
5283 } else {
5284 at::Tensor tmp_output;
5285 {
5286 at::AutoDispatchSkipFunctionalize guard;
5287 tmp_output = at::_ops::full::call(size, fill_value, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
5288 }
5289 at::functionalization::impl::replace_(out, tmp_output);
5290 at::functionalization::impl::commit_update(out);
5291 at::functionalization::impl::sync(out);
5292 return out;
5293 }
5294 }
5295
5296 at::Tensor & from_file_out_out(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, at::Tensor & out) {
5297 if (false) {
5298 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5299 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5300 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5301 auto out_meta = to_meta(out);
5302 at::AutoDispatchSkipFunctionalize func_guard;
5303 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5304 at::_ops::from_file_out::call(filename, shared, size, out_meta);
5305 }
5306
5307 at::Tensor out_;
5308 if (at::functionalization::impl::isFunctionalTensor(out)) {
5309 at::functionalization::impl::sync(out);
5310 out_ = at::functionalization::impl::from_functional_tensor(out);
5311 } else {
5312 out_ = out;
5313 }
5314 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5315 if ((false)) {
5316 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5317 TORCH_INTERNAL_ASSERT(false,
5318 "mutating a non-functional tensor with a functional tensor is not allowed.",
5319 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5320 } else {
5321 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5322 at::AutoDispatchSkipFunctionalize guard;
5323 at::Tensor tmp_output = at::_ops::from_file_out::call(filename, shared, size, out_);
5324 return out;;
5325 }
5326 } else {
5327 at::Tensor tmp_output;
5328 {
5329 at::AutoDispatchSkipFunctionalize guard;
5330 tmp_output = at::_ops::from_file::call(filename, shared, size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
5331 }
5332 at::functionalization::impl::replace_(out, tmp_output);
5333 at::functionalization::impl::commit_update(out);
5334 at::functionalization::impl::sync(out);
5335 return out;
5336 }
5337 }
5338
5339 at::Tensor & grid_sampler_2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
5340 if (false) {
5341 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5342 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5343 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5344 auto input_meta = to_meta(input);
5345 auto grid_meta = to_meta(grid);
5346 auto out_meta = to_meta(out);
5347 at::AutoDispatchSkipFunctionalize func_guard;
5348 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5349 at::_ops::grid_sampler_2d_out::call(input_meta, grid_meta, interpolation_mode, padding_mode, align_corners, out_meta);
5350 }
5351
5352 at::Tensor input_;
5353 if (at::functionalization::impl::isFunctionalTensor(input)) {
5354 at::functionalization::impl::sync(input);
5355 input_ = at::functionalization::impl::from_functional_tensor(input);
5356 } else {
5357 input_ = input;
5358 }
5359
5360 at::Tensor grid_;
5361 if (at::functionalization::impl::isFunctionalTensor(grid)) {
5362 at::functionalization::impl::sync(grid);
5363 grid_ = at::functionalization::impl::from_functional_tensor(grid);
5364 } else {
5365 grid_ = grid;
5366 }
5367
5368 at::Tensor out_;
5369 if (at::functionalization::impl::isFunctionalTensor(out)) {
5370 at::functionalization::impl::sync(out);
5371 out_ = at::functionalization::impl::from_functional_tensor(out);
5372 } else {
5373 out_ = out;
5374 }
5375 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5376 if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grid))) {
5377 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5378 TORCH_INTERNAL_ASSERT(false,
5379 "mutating a non-functional tensor with a functional tensor is not allowed.",
5380 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5381 } else {
5382 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5383 at::AutoDispatchSkipFunctionalize guard;
5384 at::Tensor tmp_output = at::_ops::grid_sampler_2d_out::call(input_, grid_, interpolation_mode, padding_mode, align_corners, out_);
5385 return out;;
5386 }
5387 } else {
5388 at::Tensor tmp_output;
5389 {
5390 at::AutoDispatchSkipFunctionalize guard;
5391 tmp_output = at::_ops::grid_sampler_2d::call(input_, grid_, interpolation_mode, padding_mode, align_corners);
5392 }
5393 at::functionalization::impl::replace_(out, tmp_output);
5394 at::functionalization::impl::commit_update(out);
5395 at::functionalization::impl::sync(out);
5396 return out;
5397 }
5398 }
5399
5400 at::Tensor & grid_sampler_3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
5401 if (false) {
5402 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5403 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5404 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5405 auto input_meta = to_meta(input);
5406 auto grid_meta = to_meta(grid);
5407 auto out_meta = to_meta(out);
5408 at::AutoDispatchSkipFunctionalize func_guard;
5409 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5410 at::_ops::grid_sampler_3d_out::call(input_meta, grid_meta, interpolation_mode, padding_mode, align_corners, out_meta);
5411 }
5412
5413 at::Tensor input_;
5414 if (at::functionalization::impl::isFunctionalTensor(input)) {
5415 at::functionalization::impl::sync(input);
5416 input_ = at::functionalization::impl::from_functional_tensor(input);
5417 } else {
5418 input_ = input;
5419 }
5420
5421 at::Tensor grid_;
5422 if (at::functionalization::impl::isFunctionalTensor(grid)) {
5423 at::functionalization::impl::sync(grid);
5424 grid_ = at::functionalization::impl::from_functional_tensor(grid);
5425 } else {
5426 grid_ = grid;
5427 }
5428
5429 at::Tensor out_;
5430 if (at::functionalization::impl::isFunctionalTensor(out)) {
5431 at::functionalization::impl::sync(out);
5432 out_ = at::functionalization::impl::from_functional_tensor(out);
5433 } else {
5434 out_ = out;
5435 }
5436 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5437 if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grid))) {
5438 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5439 TORCH_INTERNAL_ASSERT(false,
5440 "mutating a non-functional tensor with a functional tensor is not allowed.",
5441 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5442 } else {
5443 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5444 at::AutoDispatchSkipFunctionalize guard;
5445 at::Tensor tmp_output = at::_ops::grid_sampler_3d_out::call(input_, grid_, interpolation_mode, padding_mode, align_corners, out_);
5446 return out;;
5447 }
5448 } else {
5449 at::Tensor tmp_output;
5450 {
5451 at::AutoDispatchSkipFunctionalize guard;
5452 tmp_output = at::_ops::grid_sampler_3d::call(input_, grid_, interpolation_mode, padding_mode, align_corners);
5453 }
5454 at::functionalization::impl::replace_(out, tmp_output);
5455 at::functionalization::impl::commit_update(out);
5456 at::functionalization::impl::sync(out);
5457 return out;
5458 }
5459 }
5460
5461 at::Tensor & hamming_window_out_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
5462 if (false) {
5463 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5464 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5465 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5466 auto out_meta = to_meta(out);
5467 at::AutoDispatchSkipFunctionalize func_guard;
5468 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5469 at::_ops::hamming_window_out::call(window_length, out_meta);
5470 }
5471
5472 at::Tensor out_;
5473 if (at::functionalization::impl::isFunctionalTensor(out)) {
5474 at::functionalization::impl::sync(out);
5475 out_ = at::functionalization::impl::from_functional_tensor(out);
5476 } else {
5477 out_ = out;
5478 }
5479 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5480 if ((false)) {
5481 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5482 TORCH_INTERNAL_ASSERT(false,
5483 "mutating a non-functional tensor with a functional tensor is not allowed.",
5484 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5485 } else {
5486 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5487 at::AutoDispatchSkipFunctionalize guard;
5488 at::Tensor tmp_output = at::_ops::hamming_window_out::call(window_length, out_);
5489 return out;;
5490 }
5491 } else {
5492 at::Tensor tmp_output;
5493 {
5494 at::AutoDispatchSkipFunctionalize guard;
5495 tmp_output = at::_ops::hamming_window::call(window_length, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
5496 }
5497 at::functionalization::impl::replace_(out, tmp_output);
5498 at::functionalization::impl::commit_update(out);
5499 at::functionalization::impl::sync(out);
5500 return out;
5501 }
5502 }
5503
5504 at::Tensor & hamming_window_out_periodic_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
5505 if (false) {
5506 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5507 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5508 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5509 auto out_meta = to_meta(out);
5510 at::AutoDispatchSkipFunctionalize func_guard;
5511 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5512 at::_ops::hamming_window_periodic_out::call(window_length, periodic, out_meta);
5513 }
5514
5515 at::Tensor out_;
5516 if (at::functionalization::impl::isFunctionalTensor(out)) {
5517 at::functionalization::impl::sync(out);
5518 out_ = at::functionalization::impl::from_functional_tensor(out);
5519 } else {
5520 out_ = out;
5521 }
5522 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5523 if ((false)) {
5524 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5525 TORCH_INTERNAL_ASSERT(false,
5526 "mutating a non-functional tensor with a functional tensor is not allowed.",
5527 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5528 } else {
5529 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5530 at::AutoDispatchSkipFunctionalize guard;
5531 at::Tensor tmp_output = at::_ops::hamming_window_periodic_out::call(window_length, periodic, out_);
5532 return out;;
5533 }
5534 } else {
5535 at::Tensor tmp_output;
5536 {
5537 at::AutoDispatchSkipFunctionalize guard;
5538 tmp_output = at::_ops::hamming_window_periodic::call(window_length, periodic, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
5539 }
5540 at::functionalization::impl::replace_(out, tmp_output);
5541 at::functionalization::impl::commit_update(out);
5542 at::functionalization::impl::sync(out);
5543 return out;
5544 }
5545 }
5546
5547 at::Tensor & hamming_window_out_periodic_alpha_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, at::Tensor & out) {
5548 if (false) {
5549 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5550 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5551 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5552 auto out_meta = to_meta(out);
5553 at::AutoDispatchSkipFunctionalize func_guard;
5554 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5555 at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out_meta);
5556 }
5557
5558 at::Tensor out_;
5559 if (at::functionalization::impl::isFunctionalTensor(out)) {
5560 at::functionalization::impl::sync(out);
5561 out_ = at::functionalization::impl::from_functional_tensor(out);
5562 } else {
5563 out_ = out;
5564 }
5565 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5566 if ((false)) {
5567 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5568 TORCH_INTERNAL_ASSERT(false,
5569 "mutating a non-functional tensor with a functional tensor is not allowed.",
5570 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5571 } else {
5572 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5573 at::AutoDispatchSkipFunctionalize guard;
5574 at::Tensor tmp_output = at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out_);
5575 return out;;
5576 }
5577 } else {
5578 at::Tensor tmp_output;
5579 {
5580 at::AutoDispatchSkipFunctionalize guard;
5581 tmp_output = at::_ops::hamming_window_periodic_alpha::call(window_length, periodic, alpha, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
5582 }
5583 at::functionalization::impl::replace_(out, tmp_output);
5584 at::functionalization::impl::commit_update(out);
5585 at::functionalization::impl::sync(out);
5586 return out;
5587 }
5588 }
5589
5590 at::Tensor & hamming_window_out_periodic_alpha_beta_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) {
5591 if (false) {
5592 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5593 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5594 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5595 auto out_meta = to_meta(out);
5596 at::AutoDispatchSkipFunctionalize func_guard;
5597 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5598 at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out_meta);
5599 }
5600
5601 at::Tensor out_;
5602 if (at::functionalization::impl::isFunctionalTensor(out)) {
5603 at::functionalization::impl::sync(out);
5604 out_ = at::functionalization::impl::from_functional_tensor(out);
5605 } else {
5606 out_ = out;
5607 }
5608 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5609 if ((false)) {
5610 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5611 TORCH_INTERNAL_ASSERT(false,
5612 "mutating a non-functional tensor with a functional tensor is not allowed.",
5613 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5614 } else {
5615 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5616 at::AutoDispatchSkipFunctionalize guard;
5617 at::Tensor tmp_output = at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out_);
5618 return out;;
5619 }
5620 } else {
5621 at::Tensor tmp_output;
5622 {
5623 at::AutoDispatchSkipFunctionalize guard;
5624 tmp_output = at::_ops::hamming_window_periodic_alpha_beta::call(window_length, periodic, alpha, beta, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
5625 }
5626 at::functionalization::impl::replace_(out, tmp_output);
5627 at::functionalization::impl::commit_update(out);
5628 at::functionalization::impl::sync(out);
5629 return out;
5630 }
5631 }
5632
5633 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
5634 if (false) {
5635 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5636 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5637 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5638 auto input_meta = to_meta(input);
5639 auto weight_meta = to_meta(weight);
5640 auto bias_meta = to_meta(bias);
5641 auto out0_meta = to_meta(out0);
5642 auto out1_meta = to_meta(out1);
5643 auto out2_meta = to_meta(out2);
5644 at::AutoDispatchSkipFunctionalize func_guard;
5645 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5646 at::_ops::native_group_norm_out::call(input_meta, weight_meta, bias_meta, N, C, HxW, group, eps, out0_meta, out1_meta, out2_meta);
5647 }
5648
5649 at::Tensor input_;
5650 if (at::functionalization::impl::isFunctionalTensor(input)) {
5651 at::functionalization::impl::sync(input);
5652 input_ = at::functionalization::impl::from_functional_tensor(input);
5653 } else {
5654 input_ = input;
5655 }
5656
5657 c10::optional<at::Tensor> weight_;
5658 if (at::functionalization::impl::isFunctionalTensor(weight)) {
5659 at::functionalization::impl::sync(weight);
5660 weight_ = at::functionalization::impl::from_functional_tensor(weight);
5661 } else {
5662 weight_ = weight;
5663 }
5664
5665 c10::optional<at::Tensor> bias_;
5666 if (at::functionalization::impl::isFunctionalTensor(bias)) {
5667 at::functionalization::impl::sync(bias);
5668 bias_ = at::functionalization::impl::from_functional_tensor(bias);
5669 } else {
5670 bias_ = bias;
5671 }
5672
5673 at::Tensor out0_;
5674 if (at::functionalization::impl::isFunctionalTensor(out0)) {
5675 at::functionalization::impl::sync(out0);
5676 out0_ = at::functionalization::impl::from_functional_tensor(out0);
5677 } else {
5678 out0_ = out0;
5679 }
5680
5681 at::Tensor out1_;
5682 if (at::functionalization::impl::isFunctionalTensor(out1)) {
5683 at::functionalization::impl::sync(out1);
5684 out1_ = at::functionalization::impl::from_functional_tensor(out1);
5685 } else {
5686 out1_ = out1;
5687 }
5688
5689 at::Tensor out2_;
5690 if (at::functionalization::impl::isFunctionalTensor(out2)) {
5691 at::functionalization::impl::sync(out2);
5692 out2_ = at::functionalization::impl::from_functional_tensor(out2);
5693 } else {
5694 out2_ = out2;
5695 }
5696 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
5697 if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
5698 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5699 TORCH_INTERNAL_ASSERT(false,
5700 "mutating a non-functional tensor with a functional tensor is not allowed.",
5701 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5702 } else {
5703 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5704 at::AutoDispatchSkipFunctionalize guard;
5705 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::native_group_norm_out::call(input_, weight_, bias_, N, C, HxW, group, eps, out0_, out1_, out2_);
5706 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);;
5707 }
5708 } else {
5709 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
5710 {
5711 at::AutoDispatchSkipFunctionalize guard;
5712 tmp_output = at::_ops::native_group_norm::call(input_, weight_, bias_, N, C, HxW, group, eps);
5713 }
5714 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
5715 at::functionalization::impl::commit_update(out0);
5716 at::functionalization::impl::sync(out0);
5717 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
5718 at::functionalization::impl::commit_update(out1);
5719 at::functionalization::impl::sync(out1);
5720 at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
5721 at::functionalization::impl::commit_update(out2);
5722 at::functionalization::impl::sync(out2);
5723 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
5724 }
5725 }
5726
5727 at::Tensor & _fft_c2r_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size, at::Tensor & out) {
5728 if (false) {
5729 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5730 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5731 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5732 auto self_meta = to_meta(self);
5733 auto out_meta = to_meta(out);
5734 at::AutoDispatchSkipFunctionalize func_guard;
5735 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5736 at::_ops::_fft_c2r_out::call(self_meta, dim, normalization, last_dim_size, out_meta);
5737 }
5738
5739 at::Tensor self_;
5740 if (at::functionalization::impl::isFunctionalTensor(self)) {
5741 at::functionalization::impl::sync(self);
5742 self_ = at::functionalization::impl::from_functional_tensor(self);
5743 } else {
5744 self_ = self;
5745 }
5746
5747 at::Tensor out_;
5748 if (at::functionalization::impl::isFunctionalTensor(out)) {
5749 at::functionalization::impl::sync(out);
5750 out_ = at::functionalization::impl::from_functional_tensor(out);
5751 } else {
5752 out_ = out;
5753 }
5754 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5755 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
5756 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5757 TORCH_INTERNAL_ASSERT(false,
5758 "mutating a non-functional tensor with a functional tensor is not allowed.",
5759 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5760 } else {
5761 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5762 at::AutoDispatchSkipFunctionalize guard;
5763 at::Tensor tmp_output = at::_ops::_fft_c2r_out::call(self_, dim, normalization, last_dim_size, out_);
5764 return out;;
5765 }
5766 } else {
5767 at::Tensor tmp_output;
5768 {
5769 at::AutoDispatchSkipFunctionalize guard;
5770 tmp_output = at::_ops::_fft_c2r::call(self_, dim, normalization, last_dim_size);
5771 }
5772 at::functionalization::impl::replace_(out, tmp_output);
5773 at::functionalization::impl::commit_update(out);
5774 at::functionalization::impl::sync(out);
5775 return out;
5776 }
5777 }
5778
5779 at::Tensor & isnan_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
5780 if (false) {
5781 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5782 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5783 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5784 auto self_meta = to_meta(self);
5785 auto out_meta = to_meta(out);
5786 at::AutoDispatchSkipFunctionalize func_guard;
5787 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5788 at::_ops::isnan_out::call(self_meta, out_meta);
5789 }
5790
5791 at::Tensor self_;
5792 if (at::functionalization::impl::isFunctionalTensor(self)) {
5793 at::functionalization::impl::sync(self);
5794 self_ = at::functionalization::impl::from_functional_tensor(self);
5795 } else {
5796 self_ = self;
5797 }
5798
5799 at::Tensor out_;
5800 if (at::functionalization::impl::isFunctionalTensor(out)) {
5801 at::functionalization::impl::sync(out);
5802 out_ = at::functionalization::impl::from_functional_tensor(out);
5803 } else {
5804 out_ = out;
5805 }
5806 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5807 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
5808 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5809 TORCH_INTERNAL_ASSERT(false,
5810 "mutating a non-functional tensor with a functional tensor is not allowed.",
5811 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5812 } else {
5813 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5814 at::AutoDispatchSkipFunctionalize guard;
5815 at::Tensor tmp_output = at::_ops::isnan_out::call(self_, out_);
5816 return out;;
5817 }
5818 } else {
5819 at::Tensor tmp_output;
5820 {
5821 at::AutoDispatchSkipFunctionalize guard;
5822 tmp_output = at::_ops::isnan::call(self_);
5823 }
5824 at::functionalization::impl::replace_(out, tmp_output);
5825 at::functionalization::impl::commit_update(out);
5826 at::functionalization::impl::sync(out);
5827 return out;
5828 }
5829 }
5830
5831 at::Tensor & ldexp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
5832 if (false) {
5833 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5834 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5835 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5836 auto self_meta = to_meta(self);
5837 auto other_meta = to_meta(other);
5838 auto out_meta = to_meta(out);
5839 at::AutoDispatchSkipFunctionalize func_guard;
5840 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5841 at::_ops::ldexp_out::call(self_meta, other_meta, out_meta);
5842 }
5843
5844 at::Tensor self_;
5845 if (at::functionalization::impl::isFunctionalTensor(self)) {
5846 at::functionalization::impl::sync(self);
5847 self_ = at::functionalization::impl::from_functional_tensor(self);
5848 } else {
5849 self_ = self;
5850 }
5851
5852 at::Tensor other_;
5853 if (at::functionalization::impl::isFunctionalTensor(other)) {
5854 at::functionalization::impl::sync(other);
5855 other_ = at::functionalization::impl::from_functional_tensor(other);
5856 } else {
5857 other_ = other;
5858 }
5859
5860 at::Tensor out_;
5861 if (at::functionalization::impl::isFunctionalTensor(out)) {
5862 at::functionalization::impl::sync(out);
5863 out_ = at::functionalization::impl::from_functional_tensor(out);
5864 } else {
5865 out_ = out;
5866 }
5867 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5868 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
5869 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5870 TORCH_INTERNAL_ASSERT(false,
5871 "mutating a non-functional tensor with a functional tensor is not allowed.",
5872 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5873 } else {
5874 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5875 at::AutoDispatchSkipFunctionalize guard;
5876 at::Tensor tmp_output = at::_ops::ldexp_out::call(self_, other_, out_);
5877 return out;;
5878 }
5879 } else {
5880 at::Tensor tmp_output;
5881 {
5882 at::AutoDispatchSkipFunctionalize guard;
5883 tmp_output = at::_ops::ldexp_Tensor::call(self_, other_);
5884 }
5885 at::functionalization::impl::replace_(out, tmp_output);
5886 at::functionalization::impl::commit_update(out);
5887 at::functionalization::impl::sync(out);
5888 return out;
5889 }
5890 }
5891
5892 at::Tensor & ldexp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
5893 if (true) {
5894 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5895 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5896 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5897 auto self_meta = to_meta(self);
5898 auto other_meta = to_meta(other);
5899 at::AutoDispatchSkipFunctionalize func_guard;
5900 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5901 at::_ops::ldexp_::call(self_meta, other_meta);
5902 }
5903
5904 at::Tensor self_;
5905 if (at::functionalization::impl::isFunctionalTensor(self)) {
5906 at::functionalization::impl::sync(self);
5907 self_ = at::functionalization::impl::from_functional_tensor(self);
5908 } else {
5909 self_ = self;
5910 }
5911
5912 at::Tensor other_;
5913 if (at::functionalization::impl::isFunctionalTensor(other)) {
5914 at::functionalization::impl::sync(other);
5915 other_ = at::functionalization::impl::from_functional_tensor(other);
5916 } else {
5917 other_ = other;
5918 }
5919 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
5920 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
5921 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5922 TORCH_INTERNAL_ASSERT(false,
5923 "mutating a non-functional tensor with a functional tensor is not allowed.",
5924 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5925 } else {
5926 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5927 at::AutoDispatchSkipFunctionalize guard;
5928 at::Tensor tmp_output = at::_ops::ldexp_::call(self_, other_);
5929 return self;;
5930 }
5931 } else {
5932 at::Tensor tmp_output;
5933 {
5934 at::AutoDispatchSkipFunctionalize guard;
5935 tmp_output = at::_ops::ldexp_Tensor::call(self_, other_);
5936 }
5937 at::functionalization::impl::replace_(self, tmp_output);
5938 at::functionalization::impl::commit_update(self);
5939 at::functionalization::impl::sync(self);
5940 return self;
5941 }
5942 }
5943
5944 at::Tensor & log2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
5945 if (false) {
5946 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5947 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
5948 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
5949 auto self_meta = to_meta(self);
5950 auto out_meta = to_meta(out);
5951 at::AutoDispatchSkipFunctionalize func_guard;
5952 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
5953 at::_ops::log2_out::call(self_meta, out_meta);
5954 }
5955
5956 at::Tensor self_;
5957 if (at::functionalization::impl::isFunctionalTensor(self)) {
5958 at::functionalization::impl::sync(self);
5959 self_ = at::functionalization::impl::from_functional_tensor(self);
5960 } else {
5961 self_ = self;
5962 }
5963
5964 at::Tensor out_;
5965 if (at::functionalization::impl::isFunctionalTensor(out)) {
5966 at::functionalization::impl::sync(out);
5967 out_ = at::functionalization::impl::from_functional_tensor(out);
5968 } else {
5969 out_ = out;
5970 }
5971 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
5972 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
5973 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
5974 TORCH_INTERNAL_ASSERT(false,
5975 "mutating a non-functional tensor with a functional tensor is not allowed.",
5976 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
5977 } else {
5978 // case 2: arguments are not functional tensors, so we no-op and redispatch.
5979 at::AutoDispatchSkipFunctionalize guard;
5980 at::Tensor tmp_output = at::_ops::log2_out::call(self_, out_);
5981 return out;;
5982 }
5983 } else {
5984 at::Tensor tmp_output;
5985 {
5986 at::AutoDispatchSkipFunctionalize guard;
5987 tmp_output = at::_ops::log2::call(self_);
5988 }
5989 at::functionalization::impl::replace_(out, tmp_output);
5990 at::functionalization::impl::commit_update(out);
5991 at::functionalization::impl::sync(out);
5992 return out;
5993 }
5994 }
5995
5996 at::Tensor & log2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
5997 if (true) {
5998 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
5999 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6000 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6001 auto self_meta = to_meta(self);
6002 at::AutoDispatchSkipFunctionalize func_guard;
6003 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6004 at::_ops::log2_::call(self_meta);
6005 }
6006
6007 at::Tensor self_;
6008 if (at::functionalization::impl::isFunctionalTensor(self)) {
6009 at::functionalization::impl::sync(self);
6010 self_ = at::functionalization::impl::from_functional_tensor(self);
6011 } else {
6012 self_ = self;
6013 }
6014 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
6015 if ((false)) {
6016 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6017 TORCH_INTERNAL_ASSERT(false,
6018 "mutating a non-functional tensor with a functional tensor is not allowed.",
6019 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6020 } else {
6021 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6022 at::AutoDispatchSkipFunctionalize guard;
6023 at::Tensor tmp_output = at::_ops::log2_::call(self_);
6024 return self;;
6025 }
6026 } else {
6027 at::Tensor tmp_output;
6028 {
6029 at::AutoDispatchSkipFunctionalize guard;
6030 tmp_output = at::_ops::log2::call(self_);
6031 }
6032 at::functionalization::impl::replace_(self, tmp_output);
6033 at::functionalization::impl::commit_update(self);
6034 at::functionalization::impl::sync(self);
6035 return self;
6036 }
6037 }
6038
6039 at::Tensor & logaddexp2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
6040 if (false) {
6041 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6042 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6043 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6044 auto self_meta = to_meta(self);
6045 auto other_meta = to_meta(other);
6046 auto out_meta = to_meta(out);
6047 at::AutoDispatchSkipFunctionalize func_guard;
6048 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6049 at::_ops::logaddexp2_out::call(self_meta, other_meta, out_meta);
6050 }
6051
6052 at::Tensor self_;
6053 if (at::functionalization::impl::isFunctionalTensor(self)) {
6054 at::functionalization::impl::sync(self);
6055 self_ = at::functionalization::impl::from_functional_tensor(self);
6056 } else {
6057 self_ = self;
6058 }
6059
6060 at::Tensor other_;
6061 if (at::functionalization::impl::isFunctionalTensor(other)) {
6062 at::functionalization::impl::sync(other);
6063 other_ = at::functionalization::impl::from_functional_tensor(other);
6064 } else {
6065 other_ = other;
6066 }
6067
6068 at::Tensor out_;
6069 if (at::functionalization::impl::isFunctionalTensor(out)) {
6070 at::functionalization::impl::sync(out);
6071 out_ = at::functionalization::impl::from_functional_tensor(out);
6072 } else {
6073 out_ = out;
6074 }
6075 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
6076 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
6077 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6078 TORCH_INTERNAL_ASSERT(false,
6079 "mutating a non-functional tensor with a functional tensor is not allowed.",
6080 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6081 } else {
6082 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6083 at::AutoDispatchSkipFunctionalize guard;
6084 at::Tensor tmp_output = at::_ops::logaddexp2_out::call(self_, other_, out_);
6085 return out;;
6086 }
6087 } else {
6088 at::Tensor tmp_output;
6089 {
6090 at::AutoDispatchSkipFunctionalize guard;
6091 tmp_output = at::_ops::logaddexp2::call(self_, other_);
6092 }
6093 at::functionalization::impl::replace_(out, tmp_output);
6094 at::functionalization::impl::commit_update(out);
6095 at::functionalization::impl::sync(out);
6096 return out;
6097 }
6098 }
6099
6100 at::Tensor & xlogy_out_OutTensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
6101 if (false) {
6102 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6103 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6104 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6105 auto self_meta = to_meta(self);
6106 auto other_meta = to_meta(other);
6107 auto out_meta = to_meta(out);
6108 at::AutoDispatchSkipFunctionalize func_guard;
6109 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6110 at::_ops::xlogy_OutTensor::call(self_meta, other_meta, out_meta);
6111 }
6112
6113 at::Tensor self_;
6114 if (at::functionalization::impl::isFunctionalTensor(self)) {
6115 at::functionalization::impl::sync(self);
6116 self_ = at::functionalization::impl::from_functional_tensor(self);
6117 } else {
6118 self_ = self;
6119 }
6120
6121 at::Tensor other_;
6122 if (at::functionalization::impl::isFunctionalTensor(other)) {
6123 at::functionalization::impl::sync(other);
6124 other_ = at::functionalization::impl::from_functional_tensor(other);
6125 } else {
6126 other_ = other;
6127 }
6128
6129 at::Tensor out_;
6130 if (at::functionalization::impl::isFunctionalTensor(out)) {
6131 at::functionalization::impl::sync(out);
6132 out_ = at::functionalization::impl::from_functional_tensor(out);
6133 } else {
6134 out_ = out;
6135 }
6136 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
6137 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
6138 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6139 TORCH_INTERNAL_ASSERT(false,
6140 "mutating a non-functional tensor with a functional tensor is not allowed.",
6141 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6142 } else {
6143 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6144 at::AutoDispatchSkipFunctionalize guard;
6145 at::Tensor tmp_output = at::_ops::xlogy_OutTensor::call(self_, other_, out_);
6146 return out;;
6147 }
6148 } else {
6149 at::Tensor tmp_output;
6150 {
6151 at::AutoDispatchSkipFunctionalize guard;
6152 tmp_output = at::_ops::xlogy_Tensor::call(self_, other_);
6153 }
6154 at::functionalization::impl::replace_(out, tmp_output);
6155 at::functionalization::impl::commit_update(out);
6156 at::functionalization::impl::sync(out);
6157 return out;
6158 }
6159 }
6160
6161 at::Tensor & xlogy__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
6162 if (true) {
6163 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6164 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6165 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6166 auto self_meta = to_meta(self);
6167 auto other_meta = to_meta(other);
6168 at::AutoDispatchSkipFunctionalize func_guard;
6169 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6170 at::_ops::xlogy__Tensor::call(self_meta, other_meta);
6171 }
6172
6173 at::Tensor self_;
6174 if (at::functionalization::impl::isFunctionalTensor(self)) {
6175 at::functionalization::impl::sync(self);
6176 self_ = at::functionalization::impl::from_functional_tensor(self);
6177 } else {
6178 self_ = self;
6179 }
6180
6181 at::Tensor other_;
6182 if (at::functionalization::impl::isFunctionalTensor(other)) {
6183 at::functionalization::impl::sync(other);
6184 other_ = at::functionalization::impl::from_functional_tensor(other);
6185 } else {
6186 other_ = other;
6187 }
6188 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
6189 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
6190 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6191 TORCH_INTERNAL_ASSERT(false,
6192 "mutating a non-functional tensor with a functional tensor is not allowed.",
6193 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6194 } else {
6195 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6196 at::AutoDispatchSkipFunctionalize guard;
6197 at::Tensor tmp_output = at::_ops::xlogy__Tensor::call(self_, other_);
6198 return self;;
6199 }
6200 } else {
6201 at::Tensor tmp_output;
6202 {
6203 at::AutoDispatchSkipFunctionalize guard;
6204 tmp_output = at::_ops::xlogy_Tensor::call(self_, other_);
6205 }
6206 at::functionalization::impl::replace_(self, tmp_output);
6207 at::functionalization::impl::commit_update(self);
6208 at::functionalization::impl::sync(self);
6209 return self;
6210 }
6211 }
6212
6213 at::Tensor & xlogy_out_OutScalar_Self(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
6214 if (false) {
6215 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6216 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6217 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6218 auto other_meta = to_meta(other);
6219 auto out_meta = to_meta(out);
6220 at::AutoDispatchSkipFunctionalize func_guard;
6221 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6222 at::_ops::xlogy_OutScalar_Self::call(self, other_meta, out_meta);
6223 }
6224
6225 at::Tensor other_;
6226 if (at::functionalization::impl::isFunctionalTensor(other)) {
6227 at::functionalization::impl::sync(other);
6228 other_ = at::functionalization::impl::from_functional_tensor(other);
6229 } else {
6230 other_ = other;
6231 }
6232
6233 at::Tensor out_;
6234 if (at::functionalization::impl::isFunctionalTensor(out)) {
6235 at::functionalization::impl::sync(out);
6236 out_ = at::functionalization::impl::from_functional_tensor(out);
6237 } else {
6238 out_ = out;
6239 }
6240 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
6241 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
6242 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6243 TORCH_INTERNAL_ASSERT(false,
6244 "mutating a non-functional tensor with a functional tensor is not allowed.",
6245 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6246 } else {
6247 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6248 at::AutoDispatchSkipFunctionalize guard;
6249 at::Tensor tmp_output = at::_ops::xlogy_OutScalar_Self::call(self, other_, out_);
6250 return out;;
6251 }
6252 } else {
6253 at::Tensor tmp_output;
6254 {
6255 at::AutoDispatchSkipFunctionalize guard;
6256 tmp_output = at::_ops::xlogy_Scalar_Self::call(self, other_);
6257 }
6258 at::functionalization::impl::replace_(out, tmp_output);
6259 at::functionalization::impl::commit_update(out);
6260 at::functionalization::impl::sync(out);
6261 return out;
6262 }
6263 }
6264
6265 at::Tensor & xlogy_out_OutScalar_Other(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
6266 if (false) {
6267 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6268 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6269 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6270 auto self_meta = to_meta(self);
6271 auto out_meta = to_meta(out);
6272 at::AutoDispatchSkipFunctionalize func_guard;
6273 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6274 at::_ops::xlogy_OutScalar_Other::call(self_meta, other, out_meta);
6275 }
6276
6277 at::Tensor self_;
6278 if (at::functionalization::impl::isFunctionalTensor(self)) {
6279 at::functionalization::impl::sync(self);
6280 self_ = at::functionalization::impl::from_functional_tensor(self);
6281 } else {
6282 self_ = self;
6283 }
6284
6285 at::Tensor out_;
6286 if (at::functionalization::impl::isFunctionalTensor(out)) {
6287 at::functionalization::impl::sync(out);
6288 out_ = at::functionalization::impl::from_functional_tensor(out);
6289 } else {
6290 out_ = out;
6291 }
6292 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
6293 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
6294 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6295 TORCH_INTERNAL_ASSERT(false,
6296 "mutating a non-functional tensor with a functional tensor is not allowed.",
6297 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6298 } else {
6299 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6300 at::AutoDispatchSkipFunctionalize guard;
6301 at::Tensor tmp_output = at::_ops::xlogy_OutScalar_Other::call(self_, other, out_);
6302 return out;;
6303 }
6304 } else {
6305 at::Tensor tmp_output;
6306 {
6307 at::AutoDispatchSkipFunctionalize guard;
6308 tmp_output = at::_ops::xlogy_Scalar_Other::call(self_, other);
6309 }
6310 at::functionalization::impl::replace_(out, tmp_output);
6311 at::functionalization::impl::commit_update(out);
6312 at::functionalization::impl::sync(out);
6313 return out;
6314 }
6315 }
6316
6317 at::Tensor & xlogy__Scalar_Other(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
6318 if (true) {
6319 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6320 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6321 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6322 auto self_meta = to_meta(self);
6323 at::AutoDispatchSkipFunctionalize func_guard;
6324 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6325 at::_ops::xlogy__Scalar_Other::call(self_meta, other);
6326 }
6327
6328 at::Tensor self_;
6329 if (at::functionalization::impl::isFunctionalTensor(self)) {
6330 at::functionalization::impl::sync(self);
6331 self_ = at::functionalization::impl::from_functional_tensor(self);
6332 } else {
6333 self_ = self;
6334 }
6335 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
6336 if ((false)) {
6337 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6338 TORCH_INTERNAL_ASSERT(false,
6339 "mutating a non-functional tensor with a functional tensor is not allowed.",
6340 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6341 } else {
6342 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6343 at::AutoDispatchSkipFunctionalize guard;
6344 at::Tensor tmp_output = at::_ops::xlogy__Scalar_Other::call(self_, other);
6345 return self;;
6346 }
6347 } else {
6348 at::Tensor tmp_output;
6349 {
6350 at::AutoDispatchSkipFunctionalize guard;
6351 tmp_output = at::_ops::xlogy_Scalar_Other::call(self_, other);
6352 }
6353 at::functionalization::impl::replace_(self, tmp_output);
6354 at::functionalization::impl::commit_update(self);
6355 at::functionalization::impl::sync(self);
6356 return self;
6357 }
6358 }
6359
6360 at::Tensor & logspace_out_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
6361 if (false) {
6362 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6363 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6364 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6365 auto out_meta = to_meta(out);
6366 at::AutoDispatchSkipFunctionalize func_guard;
6367 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6368 at::_ops::logspace_out::call(start, end, steps, base, out_meta);
6369 }
6370
6371 at::Tensor out_;
6372 if (at::functionalization::impl::isFunctionalTensor(out)) {
6373 at::functionalization::impl::sync(out);
6374 out_ = at::functionalization::impl::from_functional_tensor(out);
6375 } else {
6376 out_ = out;
6377 }
6378 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
6379 if ((false)) {
6380 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6381 TORCH_INTERNAL_ASSERT(false,
6382 "mutating a non-functional tensor with a functional tensor is not allowed.",
6383 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6384 } else {
6385 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6386 at::AutoDispatchSkipFunctionalize guard;
6387 at::Tensor tmp_output = at::_ops::logspace_out::call(start, end, steps, base, out_);
6388 return out;;
6389 }
6390 } else {
6391 at::Tensor tmp_output;
6392 {
6393 at::AutoDispatchSkipFunctionalize guard;
6394 tmp_output = at::_ops::logspace::call(start, end, steps, base, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
6395 }
6396 at::functionalization::impl::replace_(out, tmp_output);
6397 at::functionalization::impl::commit_update(out);
6398 at::functionalization::impl::sync(out);
6399 return out;
6400 }
6401 }
6402
6403 at::Tensor & matrix_power_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out) {
6404 if (false) {
6405 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6406 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6407 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6408 auto self_meta = to_meta(self);
6409 auto out_meta = to_meta(out);
6410 at::AutoDispatchSkipFunctionalize func_guard;
6411 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6412 at::_ops::matrix_power_out::call(self_meta, n, out_meta);
6413 }
6414
6415 at::Tensor self_;
6416 if (at::functionalization::impl::isFunctionalTensor(self)) {
6417 at::functionalization::impl::sync(self);
6418 self_ = at::functionalization::impl::from_functional_tensor(self);
6419 } else {
6420 self_ = self;
6421 }
6422
6423 at::Tensor out_;
6424 if (at::functionalization::impl::isFunctionalTensor(out)) {
6425 at::functionalization::impl::sync(out);
6426 out_ = at::functionalization::impl::from_functional_tensor(out);
6427 } else {
6428 out_ = out;
6429 }
6430 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
6431 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
6432 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6433 TORCH_INTERNAL_ASSERT(false,
6434 "mutating a non-functional tensor with a functional tensor is not allowed.",
6435 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6436 } else {
6437 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6438 at::AutoDispatchSkipFunctionalize guard;
6439 at::Tensor tmp_output = at::_ops::matrix_power_out::call(self_, n, out_);
6440 return out;;
6441 }
6442 } else {
6443 at::Tensor tmp_output;
6444 {
6445 at::AutoDispatchSkipFunctionalize guard;
6446 tmp_output = at::_ops::matrix_power::call(self_, n);
6447 }
6448 at::functionalization::impl::replace_(out, tmp_output);
6449 at::functionalization::impl::commit_update(out);
6450 at::functionalization::impl::sync(out);
6451 return out;
6452 }
6453 }
6454
6455 ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out0, at::Tensor & out1) {
6456 if (false) {
6457 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6458 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6459 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6460 auto self_meta = to_meta(self);
6461 auto out0_meta = to_meta(out0);
6462 auto out1_meta = to_meta(out1);
6463 at::AutoDispatchSkipFunctionalize func_guard;
6464 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6465 at::_ops::_aminmax_out::call(self_meta, out0_meta, out1_meta);
6466 }
6467
6468 at::Tensor self_;
6469 if (at::functionalization::impl::isFunctionalTensor(self)) {
6470 at::functionalization::impl::sync(self);
6471 self_ = at::functionalization::impl::from_functional_tensor(self);
6472 } else {
6473 self_ = self;
6474 }
6475
6476 at::Tensor out0_;
6477 if (at::functionalization::impl::isFunctionalTensor(out0)) {
6478 at::functionalization::impl::sync(out0);
6479 out0_ = at::functionalization::impl::from_functional_tensor(out0);
6480 } else {
6481 out0_ = out0;
6482 }
6483
6484 at::Tensor out1_;
6485 if (at::functionalization::impl::isFunctionalTensor(out1)) {
6486 at::functionalization::impl::sync(out1);
6487 out1_ = at::functionalization::impl::from_functional_tensor(out1);
6488 } else {
6489 out1_ = out1;
6490 }
6491 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
6492 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
6493 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6494 TORCH_INTERNAL_ASSERT(false,
6495 "mutating a non-functional tensor with a functional tensor is not allowed.",
6496 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6497 } else {
6498 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6499 at::AutoDispatchSkipFunctionalize guard;
6500 ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_aminmax_out::call(self_, out0_, out1_);
6501 return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);;
6502 }
6503 } else {
6504 ::std::tuple<at::Tensor,at::Tensor> tmp_output;
6505 {
6506 at::AutoDispatchSkipFunctionalize guard;
6507 tmp_output = at::_ops::_aminmax::call(self_);
6508 }
6509 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
6510 at::functionalization::impl::commit_update(out0);
6511 at::functionalization::impl::sync(out0);
6512 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
6513 at::functionalization::impl::commit_update(out1);
6514 at::functionalization::impl::sync(out1);
6515 return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
6516 }
6517 }
6518
6519 ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out_dim_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
6520 if (false) {
6521 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6522 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6523 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6524 auto self_meta = to_meta(self);
6525 auto out0_meta = to_meta(out0);
6526 auto out1_meta = to_meta(out1);
6527 at::AutoDispatchSkipFunctionalize func_guard;
6528 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6529 at::_ops::_aminmax_dim_out::call(self_meta, dim, keepdim, out0_meta, out1_meta);
6530 }
6531
6532 at::Tensor self_;
6533 if (at::functionalization::impl::isFunctionalTensor(self)) {
6534 at::functionalization::impl::sync(self);
6535 self_ = at::functionalization::impl::from_functional_tensor(self);
6536 } else {
6537 self_ = self;
6538 }
6539
6540 at::Tensor out0_;
6541 if (at::functionalization::impl::isFunctionalTensor(out0)) {
6542 at::functionalization::impl::sync(out0);
6543 out0_ = at::functionalization::impl::from_functional_tensor(out0);
6544 } else {
6545 out0_ = out0;
6546 }
6547
6548 at::Tensor out1_;
6549 if (at::functionalization::impl::isFunctionalTensor(out1)) {
6550 at::functionalization::impl::sync(out1);
6551 out1_ = at::functionalization::impl::from_functional_tensor(out1);
6552 } else {
6553 out1_ = out1;
6554 }
6555 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
6556 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
6557 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6558 TORCH_INTERNAL_ASSERT(false,
6559 "mutating a non-functional tensor with a functional tensor is not allowed.",
6560 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6561 } else {
6562 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6563 at::AutoDispatchSkipFunctionalize guard;
6564 ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_aminmax_dim_out::call(self_, dim, keepdim, out0_, out1_);
6565 return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);;
6566 }
6567 } else {
6568 ::std::tuple<at::Tensor,at::Tensor> tmp_output;
6569 {
6570 at::AutoDispatchSkipFunctionalize guard;
6571 tmp_output = at::_ops::_aminmax_dim::call(self_, dim, keepdim);
6572 }
6573 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
6574 at::functionalization::impl::commit_update(out0);
6575 at::functionalization::impl::sync(out0);
6576 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
6577 at::functionalization::impl::commit_update(out1);
6578 at::functionalization::impl::sync(out1);
6579 return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
6580 }
6581 }
6582
6583 ::std::tuple<at::Tensor &,at::Tensor &> aminmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max) {
6584 if (false) {
6585 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6586 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6587 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6588 auto self_meta = to_meta(self);
6589 auto min_meta = to_meta(min);
6590 auto max_meta = to_meta(max);
6591 at::AutoDispatchSkipFunctionalize func_guard;
6592 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6593 at::_ops::aminmax_out::call(self_meta, dim, keepdim, min_meta, max_meta);
6594 }
6595
6596 at::Tensor self_;
6597 if (at::functionalization::impl::isFunctionalTensor(self)) {
6598 at::functionalization::impl::sync(self);
6599 self_ = at::functionalization::impl::from_functional_tensor(self);
6600 } else {
6601 self_ = self;
6602 }
6603
6604 at::Tensor min_;
6605 if (at::functionalization::impl::isFunctionalTensor(min)) {
6606 at::functionalization::impl::sync(min);
6607 min_ = at::functionalization::impl::from_functional_tensor(min);
6608 } else {
6609 min_ = min;
6610 }
6611
6612 at::Tensor max_;
6613 if (at::functionalization::impl::isFunctionalTensor(max)) {
6614 at::functionalization::impl::sync(max);
6615 max_ = at::functionalization::impl::from_functional_tensor(max);
6616 } else {
6617 max_ = max;
6618 }
6619 if (!(true && at::functionalization::impl::isFunctionalTensor(min) && at::functionalization::impl::isFunctionalTensor(max))) {
6620 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
6621 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6622 TORCH_INTERNAL_ASSERT(false,
6623 "mutating a non-functional tensor with a functional tensor is not allowed.",
6624 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6625 } else {
6626 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6627 at::AutoDispatchSkipFunctionalize guard;
6628 ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::aminmax_out::call(self_, dim, keepdim, min_, max_);
6629 return ::std::tuple<at::Tensor &,at::Tensor &>(min, max);;
6630 }
6631 } else {
6632 ::std::tuple<at::Tensor,at::Tensor> tmp_output;
6633 {
6634 at::AutoDispatchSkipFunctionalize guard;
6635 tmp_output = at::_ops::aminmax::call(self_, dim, keepdim);
6636 }
6637 at::functionalization::impl::replace_(min, std::get<0>(tmp_output));
6638 at::functionalization::impl::commit_update(min);
6639 at::functionalization::impl::sync(min);
6640 at::functionalization::impl::replace_(max, std::get<1>(tmp_output));
6641 at::functionalization::impl::commit_update(max);
6642 at::functionalization::impl::sync(max);
6643 return ::std::tuple<at::Tensor &,at::Tensor &>(min, max);
6644 }
6645 }
6646
6647 at::Tensor & _compute_linear_combination_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out) {
6648 if (false) {
6649 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6650 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6651 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6652 auto input_meta = to_meta(input);
6653 auto coefficients_meta = to_meta(coefficients);
6654 auto out_meta = to_meta(out);
6655 at::AutoDispatchSkipFunctionalize func_guard;
6656 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6657 at::_ops::_compute_linear_combination_out::call(input_meta, coefficients_meta, out_meta);
6658 }
6659
6660 at::Tensor input_;
6661 if (at::functionalization::impl::isFunctionalTensor(input)) {
6662 at::functionalization::impl::sync(input);
6663 input_ = at::functionalization::impl::from_functional_tensor(input);
6664 } else {
6665 input_ = input;
6666 }
6667
6668 at::Tensor coefficients_;
6669 if (at::functionalization::impl::isFunctionalTensor(coefficients)) {
6670 at::functionalization::impl::sync(coefficients);
6671 coefficients_ = at::functionalization::impl::from_functional_tensor(coefficients);
6672 } else {
6673 coefficients_ = coefficients;
6674 }
6675
6676 at::Tensor out_;
6677 if (at::functionalization::impl::isFunctionalTensor(out)) {
6678 at::functionalization::impl::sync(out);
6679 out_ = at::functionalization::impl::from_functional_tensor(out);
6680 } else {
6681 out_ = out;
6682 }
6683 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
6684 if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(coefficients))) {
6685 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6686 TORCH_INTERNAL_ASSERT(false,
6687 "mutating a non-functional tensor with a functional tensor is not allowed.",
6688 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6689 } else {
6690 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6691 at::AutoDispatchSkipFunctionalize guard;
6692 at::Tensor tmp_output = at::_ops::_compute_linear_combination_out::call(input_, coefficients_, out_);
6693 return out;;
6694 }
6695 } else {
6696 at::Tensor tmp_output;
6697 {
6698 at::AutoDispatchSkipFunctionalize guard;
6699 tmp_output = at::_ops::_compute_linear_combination::call(input_, coefficients_);
6700 }
6701 at::functionalization::impl::replace_(out, tmp_output);
6702 at::functionalization::impl::commit_update(out);
6703 at::functionalization::impl::sync(out);
6704 return out;
6705 }
6706 }
6707
6708 at::Tensor & _mps_max_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
6709 if (false) {
6710 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6711 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6712 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6713 auto self_meta = to_meta(self);
6714 auto out_meta = to_meta(out);
6715 at::AutoDispatchSkipFunctionalize func_guard;
6716 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6717 at::_ops::_mps_max_pool2d_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta);
6718 }
6719
6720 at::Tensor self_;
6721 if (at::functionalization::impl::isFunctionalTensor(self)) {
6722 at::functionalization::impl::sync(self);
6723 self_ = at::functionalization::impl::from_functional_tensor(self);
6724 } else {
6725 self_ = self;
6726 }
6727
6728 at::Tensor out_;
6729 if (at::functionalization::impl::isFunctionalTensor(out)) {
6730 at::functionalization::impl::sync(out);
6731 out_ = at::functionalization::impl::from_functional_tensor(out);
6732 } else {
6733 out_ = out;
6734 }
6735 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
6736 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
6737 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6738 TORCH_INTERNAL_ASSERT(false,
6739 "mutating a non-functional tensor with a functional tensor is not allowed.",
6740 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6741 } else {
6742 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6743 at::AutoDispatchSkipFunctionalize guard;
6744 at::Tensor tmp_output = at::_ops::_mps_max_pool2d_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_);
6745 return out;;
6746 }
6747 } else {
6748 at::Tensor tmp_output;
6749 {
6750 at::AutoDispatchSkipFunctionalize guard;
6751 tmp_output = at::_ops::_mps_max_pool2d::call(self_, kernel_size, stride, padding, dilation, ceil_mode);
6752 }
6753 at::functionalization::impl::replace_(out, tmp_output);
6754 at::functionalization::impl::commit_update(out);
6755 at::functionalization::impl::sync(out);
6756 return out;
6757 }
6758 }
6759
6760 at::Tensor & mkldnn_max_pool3d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
6761 if (false) {
6762 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6763 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6764 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6765 auto grad_output_meta = to_meta(grad_output);
6766 auto output_meta = to_meta(output);
6767 auto input_meta = to_meta(input);
6768 auto out_meta = to_meta(out);
6769 at::AutoDispatchSkipFunctionalize func_guard;
6770 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6771 at::_ops::mkldnn_max_pool3d_backward_out::call(grad_output_meta, output_meta, input_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta);
6772 }
6773
6774 at::Tensor grad_output_;
6775 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
6776 at::functionalization::impl::sync(grad_output);
6777 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
6778 } else {
6779 grad_output_ = grad_output;
6780 }
6781
6782 at::Tensor output_;
6783 if (at::functionalization::impl::isFunctionalTensor(output)) {
6784 at::functionalization::impl::sync(output);
6785 output_ = at::functionalization::impl::from_functional_tensor(output);
6786 } else {
6787 output_ = output;
6788 }
6789
6790 at::Tensor input_;
6791 if (at::functionalization::impl::isFunctionalTensor(input)) {
6792 at::functionalization::impl::sync(input);
6793 input_ = at::functionalization::impl::from_functional_tensor(input);
6794 } else {
6795 input_ = input;
6796 }
6797
6798 at::Tensor out_;
6799 if (at::functionalization::impl::isFunctionalTensor(out)) {
6800 at::functionalization::impl::sync(out);
6801 out_ = at::functionalization::impl::from_functional_tensor(out);
6802 } else {
6803 out_ = out;
6804 }
6805 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
6806 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(input))) {
6807 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6808 TORCH_INTERNAL_ASSERT(false,
6809 "mutating a non-functional tensor with a functional tensor is not allowed.",
6810 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6811 } else {
6812 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6813 at::AutoDispatchSkipFunctionalize guard;
6814 at::Tensor tmp_output = at::_ops::mkldnn_max_pool3d_backward_out::call(grad_output_, output_, input_, kernel_size, stride, padding, dilation, ceil_mode, out_);
6815 return out;;
6816 }
6817 } else {
6818 at::Tensor tmp_output;
6819 {
6820 at::AutoDispatchSkipFunctionalize guard;
6821 tmp_output = at::_ops::mkldnn_max_pool3d_backward::call(grad_output_, output_, input_, kernel_size, stride, padding, dilation, ceil_mode);
6822 }
6823 at::functionalization::impl::replace_(out, tmp_output);
6824 at::functionalization::impl::commit_update(out);
6825 at::functionalization::impl::sync(out);
6826 return out;
6827 }
6828 }
6829
6830 ::std::tuple<at::Tensor &,at::Tensor &> min_out_dim_min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
6831 if (false) {
6832 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6833 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6834 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6835 auto self_meta = to_meta(self);
6836 auto min_meta = to_meta(min);
6837 auto min_indices_meta = to_meta(min_indices);
6838 at::AutoDispatchSkipFunctionalize func_guard;
6839 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6840 at::_ops::min_dim_min::call(self_meta, dim, keepdim, min_meta, min_indices_meta);
6841 }
6842
6843 at::Tensor self_;
6844 if (at::functionalization::impl::isFunctionalTensor(self)) {
6845 at::functionalization::impl::sync(self);
6846 self_ = at::functionalization::impl::from_functional_tensor(self);
6847 } else {
6848 self_ = self;
6849 }
6850
6851 at::Tensor min_;
6852 if (at::functionalization::impl::isFunctionalTensor(min)) {
6853 at::functionalization::impl::sync(min);
6854 min_ = at::functionalization::impl::from_functional_tensor(min);
6855 } else {
6856 min_ = min;
6857 }
6858
6859 at::Tensor min_indices_;
6860 if (at::functionalization::impl::isFunctionalTensor(min_indices)) {
6861 at::functionalization::impl::sync(min_indices);
6862 min_indices_ = at::functionalization::impl::from_functional_tensor(min_indices);
6863 } else {
6864 min_indices_ = min_indices;
6865 }
6866 if (!(true && at::functionalization::impl::isFunctionalTensor(min) && at::functionalization::impl::isFunctionalTensor(min_indices))) {
6867 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
6868 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6869 TORCH_INTERNAL_ASSERT(false,
6870 "mutating a non-functional tensor with a functional tensor is not allowed.",
6871 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6872 } else {
6873 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6874 at::AutoDispatchSkipFunctionalize guard;
6875 ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::min_dim_min::call(self_, dim, keepdim, min_, min_indices_);
6876 return ::std::tuple<at::Tensor &,at::Tensor &>(min, min_indices);;
6877 }
6878 } else {
6879 ::std::tuple<at::Tensor,at::Tensor> tmp_output;
6880 {
6881 at::AutoDispatchSkipFunctionalize guard;
6882 tmp_output = at::_ops::min_dim::call(self_, dim, keepdim);
6883 }
6884 at::functionalization::impl::replace_(min, std::get<0>(tmp_output));
6885 at::functionalization::impl::commit_update(min);
6886 at::functionalization::impl::sync(min);
6887 at::functionalization::impl::replace_(min_indices, std::get<1>(tmp_output));
6888 at::functionalization::impl::commit_update(min_indices);
6889 at::functionalization::impl::sync(min_indices);
6890 return ::std::tuple<at::Tensor &,at::Tensor &>(min, min_indices);
6891 }
6892 }
6893
6894 ::std::tuple<at::Tensor &,at::Tensor &> min_out_names_dim_min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
6895 if (false) {
6896 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6897 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6898 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6899 auto self_meta = to_meta(self);
6900 auto min_meta = to_meta(min);
6901 auto min_indices_meta = to_meta(min_indices);
6902 at::AutoDispatchSkipFunctionalize func_guard;
6903 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6904 at::_ops::min_names_dim_min::call(self_meta, dim, keepdim, min_meta, min_indices_meta);
6905 }
6906
6907 at::Tensor self_;
6908 if (at::functionalization::impl::isFunctionalTensor(self)) {
6909 at::functionalization::impl::sync(self);
6910 self_ = at::functionalization::impl::from_functional_tensor(self);
6911 } else {
6912 self_ = self;
6913 }
6914
6915 at::Tensor min_;
6916 if (at::functionalization::impl::isFunctionalTensor(min)) {
6917 at::functionalization::impl::sync(min);
6918 min_ = at::functionalization::impl::from_functional_tensor(min);
6919 } else {
6920 min_ = min;
6921 }
6922
6923 at::Tensor min_indices_;
6924 if (at::functionalization::impl::isFunctionalTensor(min_indices)) {
6925 at::functionalization::impl::sync(min_indices);
6926 min_indices_ = at::functionalization::impl::from_functional_tensor(min_indices);
6927 } else {
6928 min_indices_ = min_indices;
6929 }
6930 if (!(true && at::functionalization::impl::isFunctionalTensor(min) && at::functionalization::impl::isFunctionalTensor(min_indices))) {
6931 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
6932 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
6933 TORCH_INTERNAL_ASSERT(false,
6934 "mutating a non-functional tensor with a functional tensor is not allowed.",
6935 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
6936 } else {
6937 // case 2: arguments are not functional tensors, so we no-op and redispatch.
6938 at::AutoDispatchSkipFunctionalize guard;
6939 ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::min_names_dim_min::call(self_, dim, keepdim, min_, min_indices_);
6940 return ::std::tuple<at::Tensor &,at::Tensor &>(min, min_indices);;
6941 }
6942 } else {
6943 ::std::tuple<at::Tensor,at::Tensor> tmp_output;
6944 {
6945 at::AutoDispatchSkipFunctionalize guard;
6946 tmp_output = at::_ops::min_names_dim::call(self_, dim, keepdim);
6947 }
6948 at::functionalization::impl::replace_(min, std::get<0>(tmp_output));
6949 at::functionalization::impl::commit_update(min);
6950 at::functionalization::impl::sync(min);
6951 at::functionalization::impl::replace_(min_indices, std::get<1>(tmp_output));
6952 at::functionalization::impl::commit_update(min_indices);
6953 at::functionalization::impl::sync(min_indices);
6954 return ::std::tuple<at::Tensor &,at::Tensor &>(min, min_indices);
6955 }
6956 }
6957
6958 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mps_convolution_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
6959 if (false) {
6960 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
6961 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
6962 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
6963 auto self_meta = to_meta(self);
6964 auto grad_output_meta = to_meta(grad_output);
6965 auto weight_meta = to_meta(weight);
6966 auto out0_meta = to_meta(out0);
6967 auto out1_meta = to_meta(out1);
6968 auto out2_meta = to_meta(out2);
6969 at::AutoDispatchSkipFunctionalize func_guard;
6970 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
6971 at::_ops::mps_convolution_backward_out::call(self_meta, grad_output_meta, weight_meta, padding, stride, dilation, groups, output_mask, out0_meta, out1_meta, out2_meta);
6972 }
6973
6974 at::Tensor self_;
6975 if (at::functionalization::impl::isFunctionalTensor(self)) {
6976 at::functionalization::impl::sync(self);
6977 self_ = at::functionalization::impl::from_functional_tensor(self);
6978 } else {
6979 self_ = self;
6980 }
6981
6982 at::Tensor grad_output_;
6983 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
6984 at::functionalization::impl::sync(grad_output);
6985 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
6986 } else {
6987 grad_output_ = grad_output;
6988 }
6989
6990 at::Tensor weight_;
6991 if (at::functionalization::impl::isFunctionalTensor(weight)) {
6992 at::functionalization::impl::sync(weight);
6993 weight_ = at::functionalization::impl::from_functional_tensor(weight);
6994 } else {
6995 weight_ = weight;
6996 }
6997
6998 at::Tensor out0_;
6999 if (at::functionalization::impl::isFunctionalTensor(out0)) {
7000 at::functionalization::impl::sync(out0);
7001 out0_ = at::functionalization::impl::from_functional_tensor(out0);
7002 } else {
7003 out0_ = out0;
7004 }
7005
7006 at::Tensor out1_;
7007 if (at::functionalization::impl::isFunctionalTensor(out1)) {
7008 at::functionalization::impl::sync(out1);
7009 out1_ = at::functionalization::impl::from_functional_tensor(out1);
7010 } else {
7011 out1_ = out1;
7012 }
7013
7014 at::Tensor out2_;
7015 if (at::functionalization::impl::isFunctionalTensor(out2)) {
7016 at::functionalization::impl::sync(out2);
7017 out2_ = at::functionalization::impl::from_functional_tensor(out2);
7018 } else {
7019 out2_ = out2;
7020 }
7021 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
7022 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight))) {
7023 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
7024 TORCH_INTERNAL_ASSERT(false,
7025 "mutating a non-functional tensor with a functional tensor is not allowed.",
7026 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
7027 } else {
7028 // case 2: arguments are not functional tensors, so we no-op and redispatch.
7029 at::AutoDispatchSkipFunctionalize guard;
7030 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::mps_convolution_backward_out::call(self_, grad_output_, weight_, padding, stride, dilation, groups, output_mask, out0_, out1_, out2_);
7031 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);;
7032 }
7033 } else {
7034 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
7035 {
7036 at::AutoDispatchSkipFunctionalize guard;
7037 tmp_output = at::_ops::mps_convolution_backward::call(self_, grad_output_, weight_, padding, stride, dilation, groups, output_mask);
7038 }
7039 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
7040 at::functionalization::impl::commit_update(out0);
7041 at::functionalization::impl::sync(out0);
7042 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
7043 at::functionalization::impl::commit_update(out1);
7044 at::functionalization::impl::sync(out1);
7045 at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
7046 at::functionalization::impl::commit_update(out2);
7047 at::functionalization::impl::sync(out2);
7048 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
7049 }
7050 }
7051
7052 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
7053 if (false) {
7054 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
7055 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
7056 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
7057 auto input_meta = to_meta(input);
7058 auto weight_meta = to_meta(weight);
7059 auto hx_meta = to_meta(hx);
7060 auto cx_meta = to_meta(cx);
7061 auto dropout_state_meta = to_meta(dropout_state);
7062 auto out0_meta = to_meta(out0);
7063 auto out1_meta = to_meta(out1);
7064 auto out2_meta = to_meta(out2);
7065 auto out3_meta = to_meta(out3);
7066 auto out4_meta = to_meta(out4);
7067 at::AutoDispatchSkipFunctionalize func_guard;
7068 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
7069 at::_ops::miopen_rnn_out::call(input_meta, weight_meta, weight_stride0, hx_meta, cx_meta, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_meta, out0_meta, out1_meta, out2_meta, out3_meta, out4_meta);
7070 }
7071
7072 at::Tensor input_;
7073 if (at::functionalization::impl::isFunctionalTensor(input)) {
7074 at::functionalization::impl::sync(input);
7075 input_ = at::functionalization::impl::from_functional_tensor(input);
7076 } else {
7077 input_ = input;
7078 }
7079
7080 ::std::vector<at::Tensor> weight_;
7081 if (at::functionalization::impl::isFunctionalTensor(weight)) {
7082 at::functionalization::impl::sync(weight);
7083 weight_ = at::functionalization::impl::from_functional_tensor(weight);
7084 } else {
7085 weight_ = weight.vec();
7086 }
7087
7088 at::Tensor hx_;
7089 if (at::functionalization::impl::isFunctionalTensor(hx)) {
7090 at::functionalization::impl::sync(hx);
7091 hx_ = at::functionalization::impl::from_functional_tensor(hx);
7092 } else {
7093 hx_ = hx;
7094 }
7095
7096 c10::optional<at::Tensor> cx_;
7097 if (at::functionalization::impl::isFunctionalTensor(cx)) {
7098 at::functionalization::impl::sync(cx);
7099 cx_ = at::functionalization::impl::from_functional_tensor(cx);
7100 } else {
7101 cx_ = cx;
7102 }
7103
7104 c10::optional<at::Tensor> dropout_state_;
7105 if (at::functionalization::impl::isFunctionalTensor(dropout_state)) {
7106 at::functionalization::impl::sync(dropout_state);
7107 dropout_state_ = at::functionalization::impl::from_functional_tensor(dropout_state);
7108 } else {
7109 dropout_state_ = dropout_state;
7110 }
7111
7112 at::Tensor out0_;
7113 if (at::functionalization::impl::isFunctionalTensor(out0)) {
7114 at::functionalization::impl::sync(out0);
7115 out0_ = at::functionalization::impl::from_functional_tensor(out0);
7116 } else {
7117 out0_ = out0;
7118 }
7119
7120 at::Tensor out1_;
7121 if (at::functionalization::impl::isFunctionalTensor(out1)) {
7122 at::functionalization::impl::sync(out1);
7123 out1_ = at::functionalization::impl::from_functional_tensor(out1);
7124 } else {
7125 out1_ = out1;
7126 }
7127
7128 at::Tensor out2_;
7129 if (at::functionalization::impl::isFunctionalTensor(out2)) {
7130 at::functionalization::impl::sync(out2);
7131 out2_ = at::functionalization::impl::from_functional_tensor(out2);
7132 } else {
7133 out2_ = out2;
7134 }
7135
7136 at::Tensor out3_;
7137 if (at::functionalization::impl::isFunctionalTensor(out3)) {
7138 at::functionalization::impl::sync(out3);
7139 out3_ = at::functionalization::impl::from_functional_tensor(out3);
7140 } else {
7141 out3_ = out3;
7142 }
7143
7144 at::Tensor out4_;
7145 if (at::functionalization::impl::isFunctionalTensor(out4)) {
7146 at::functionalization::impl::sync(out4);
7147 out4_ = at::functionalization::impl::from_functional_tensor(out4);
7148 } else {
7149 out4_ = out4;
7150 }
7151 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3) && at::functionalization::impl::isFunctionalTensor(out4))) {
7152 if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(cx) || at::functionalization::impl::isFunctionalTensor(dropout_state))) {
7153 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
7154 TORCH_INTERNAL_ASSERT(false,
7155 "mutating a non-functional tensor with a functional tensor is not allowed.",
7156 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
7157 } else {
7158 // case 2: arguments are not functional tensors, so we no-op and redispatch.
7159 at::AutoDispatchSkipFunctionalize guard;
7160 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::miopen_rnn_out::call(input_, weight_, weight_stride0, hx_, cx_, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_, out0_, out1_, out2_, out3_, out4_);
7161 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4);;
7162 }
7163 } else {
7164 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
7165 {
7166 at::AutoDispatchSkipFunctionalize guard;
7167 tmp_output = at::_ops::miopen_rnn::call(input_, weight_, weight_stride0, hx_, cx_, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_);
7168 }
7169 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
7170 at::functionalization::impl::commit_update(out0);
7171 at::functionalization::impl::sync(out0);
7172 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
7173 at::functionalization::impl::commit_update(out1);
7174 at::functionalization::impl::sync(out1);
7175 at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
7176 at::functionalization::impl::commit_update(out2);
7177 at::functionalization::impl::sync(out2);
7178 at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
7179 at::functionalization::impl::commit_update(out3);
7180 at::functionalization::impl::sync(out3);
7181 at::functionalization::impl::replace_(out4, std::get<4>(tmp_output));
7182 at::functionalization::impl::commit_update(out4);
7183 at::functionalization::impl::sync(out4);
7184 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4);
7185 }
7186 }
7187
7188 at::Tensor & mv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec, at::Tensor & out) {
7189 if (false) {
7190 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
7191 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
7192 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
7193 auto self_meta = to_meta(self);
7194 auto vec_meta = to_meta(vec);
7195 auto out_meta = to_meta(out);
7196 at::AutoDispatchSkipFunctionalize func_guard;
7197 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
7198 at::_ops::mv_out::call(self_meta, vec_meta, out_meta);
7199 }
7200
7201 at::Tensor self_;
7202 if (at::functionalization::impl::isFunctionalTensor(self)) {
7203 at::functionalization::impl::sync(self);
7204 self_ = at::functionalization::impl::from_functional_tensor(self);
7205 } else {
7206 self_ = self;
7207 }
7208
7209 at::Tensor vec_;
7210 if (at::functionalization::impl::isFunctionalTensor(vec)) {
7211 at::functionalization::impl::sync(vec);
7212 vec_ = at::functionalization::impl::from_functional_tensor(vec);
7213 } else {
7214 vec_ = vec;
7215 }
7216
7217 at::Tensor out_;
7218 if (at::functionalization::impl::isFunctionalTensor(out)) {
7219 at::functionalization::impl::sync(out);
7220 out_ = at::functionalization::impl::from_functional_tensor(out);
7221 } else {
7222 out_ = out;
7223 }
7224 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
7225 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(vec))) {
7226 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
7227 TORCH_INTERNAL_ASSERT(false,
7228 "mutating a non-functional tensor with a functional tensor is not allowed.",
7229 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
7230 } else {
7231 // case 2: arguments are not functional tensors, so we no-op and redispatch.
7232 at::AutoDispatchSkipFunctionalize guard;
7233 at::Tensor tmp_output = at::_ops::mv_out::call(self_, vec_, out_);
7234 return out;;
7235 }
7236 } else {
7237 at::Tensor tmp_output;
7238 {
7239 at::AutoDispatchSkipFunctionalize guard;
7240 tmp_output = at::_ops::mv::call(self_, vec_);
7241 }
7242 at::functionalization::impl::replace_(out, tmp_output);
7243 at::functionalization::impl::commit_update(out);
7244 at::functionalization::impl::sync(out);
7245 return out;
7246 }
7247 }
7248
7249 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
7250 if (false) {
7251 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
7252 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
7253 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
7254 auto input_meta = to_meta(input);
7255 auto weight_meta = to_meta(weight);
7256 auto bias_meta = to_meta(bias);
7257 auto running_mean_meta = to_meta(running_mean);
7258 auto running_var_meta = to_meta(running_var);
7259 auto out_meta = to_meta(out);
7260 auto save_mean_meta = to_meta(save_mean);
7261 auto save_invstd_meta = to_meta(save_invstd);
7262 at::AutoDispatchSkipFunctionalize func_guard;
7263 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
7264 at::_ops::_native_batch_norm_legit_out::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, training, momentum, eps, out_meta, save_mean_meta, save_invstd_meta);
7265 }
7266
7267 at::Tensor input_;
7268 if (at::functionalization::impl::isFunctionalTensor(input)) {
7269 at::functionalization::impl::sync(input);
7270 input_ = at::functionalization::impl::from_functional_tensor(input);
7271 } else {
7272 input_ = input;
7273 }
7274
7275 c10::optional<at::Tensor> weight_;
7276 if (at::functionalization::impl::isFunctionalTensor(weight)) {
7277 at::functionalization::impl::sync(weight);
7278 weight_ = at::functionalization::impl::from_functional_tensor(weight);
7279 } else {
7280 weight_ = weight;
7281 }
7282
7283 c10::optional<at::Tensor> bias_;
7284 if (at::functionalization::impl::isFunctionalTensor(bias)) {
7285 at::functionalization::impl::sync(bias);
7286 bias_ = at::functionalization::impl::from_functional_tensor(bias);
7287 } else {
7288 bias_ = bias;
7289 }
7290
7291 at::Tensor running_mean_;
7292 if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
7293 at::functionalization::impl::sync(running_mean);
7294 running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
7295 } else {
7296 running_mean_ = running_mean;
7297 }
7298
7299 at::Tensor running_var_;
7300 if (at::functionalization::impl::isFunctionalTensor(running_var)) {
7301 at::functionalization::impl::sync(running_var);
7302 running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
7303 } else {
7304 running_var_ = running_var;
7305 }
7306
7307 at::Tensor out_;
7308 if (at::functionalization::impl::isFunctionalTensor(out)) {
7309 at::functionalization::impl::sync(out);
7310 out_ = at::functionalization::impl::from_functional_tensor(out);
7311 } else {
7312 out_ = out;
7313 }
7314
7315 at::Tensor save_mean_;
7316 if (at::functionalization::impl::isFunctionalTensor(save_mean)) {
7317 at::functionalization::impl::sync(save_mean);
7318 save_mean_ = at::functionalization::impl::from_functional_tensor(save_mean);
7319 } else {
7320 save_mean_ = save_mean;
7321 }
7322
7323 at::Tensor save_invstd_;
7324 if (at::functionalization::impl::isFunctionalTensor(save_invstd)) {
7325 at::functionalization::impl::sync(save_invstd);
7326 save_invstd_ = at::functionalization::impl::from_functional_tensor(save_invstd);
7327 } else {
7328 save_invstd_ = save_invstd;
7329 }
7330 if (!(true && at::functionalization::impl::isFunctionalTensor(running_mean) && at::functionalization::impl::isFunctionalTensor(running_var) && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(save_mean) && at::functionalization::impl::isFunctionalTensor(save_invstd))) {
7331 if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
7332 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
7333 TORCH_INTERNAL_ASSERT(false,
7334 "mutating a non-functional tensor with a functional tensor is not allowed.",
7335 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
7336 } else {
7337 // case 2: arguments are not functional tensors, so we no-op and redispatch.
7338 at::AutoDispatchSkipFunctionalize guard;
7339 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_native_batch_norm_legit_out::call(input_, weight_, bias_, running_mean_, running_var_, training, momentum, eps, out_, save_mean_, save_invstd_);
7340 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd);;
7341 }
7342 } else {
7343 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
7344 {
7345 at::AutoDispatchSkipFunctionalize guard;
7346 tmp_output = at::_ops::_native_batch_norm_legit_functional::call(input_, weight_, bias_, running_mean_, running_var_, training, momentum, eps);
7347 }
7348 at::functionalization::impl::replace_(running_mean, std::get<0>(tmp_output));
7349 at::functionalization::impl::commit_update(running_mean);
7350 at::functionalization::impl::sync(running_mean);
7351 at::functionalization::impl::replace_(running_var, std::get<1>(tmp_output));
7352 at::functionalization::impl::commit_update(running_var);
7353 at::functionalization::impl::sync(running_var);
7354 at::functionalization::impl::replace_(out, std::get<2>(tmp_output));
7355 at::functionalization::impl::commit_update(out);
7356 at::functionalization::impl::sync(out);
7357 at::functionalization::impl::replace_(save_mean, std::get<3>(tmp_output));
7358 at::functionalization::impl::commit_update(save_mean);
7359 at::functionalization::impl::sync(save_mean);
7360 at::functionalization::impl::replace_(save_invstd, std::get<4>(tmp_output));
7361 at::functionalization::impl::commit_update(save_invstd);
7362 at::functionalization::impl::sync(save_invstd);
7363 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd);
7364 }
7365 }
7366
7367 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
7368 if (false) {
7369 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
7370 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
7371 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
7372 auto input_meta = to_meta(input);
7373 auto weight_meta = to_meta(weight);
7374 auto bias_meta = to_meta(bias);
7375 auto running_mean_meta = to_meta(running_mean);
7376 auto running_var_meta = to_meta(running_var);
7377 at::AutoDispatchSkipFunctionalize func_guard;
7378 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
7379 at::_ops::_native_batch_norm_legit::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, training, momentum, eps);
7380 }
7381
7382 at::Tensor input_;
7383 if (at::functionalization::impl::isFunctionalTensor(input)) {
7384 at::functionalization::impl::sync(input);
7385 input_ = at::functionalization::impl::from_functional_tensor(input);
7386 } else {
7387 input_ = input;
7388 }
7389
7390 c10::optional<at::Tensor> weight_;
7391 if (at::functionalization::impl::isFunctionalTensor(weight)) {
7392 at::functionalization::impl::sync(weight);
7393 weight_ = at::functionalization::impl::from_functional_tensor(weight);
7394 } else {
7395 weight_ = weight;
7396 }
7397
7398 c10::optional<at::Tensor> bias_;
7399 if (at::functionalization::impl::isFunctionalTensor(bias)) {
7400 at::functionalization::impl::sync(bias);
7401 bias_ = at::functionalization::impl::from_functional_tensor(bias);
7402 } else {
7403 bias_ = bias;
7404 }
7405
7406 at::Tensor running_mean_;
7407 if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
7408 at::functionalization::impl::sync(running_mean);
7409 running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
7410 } else {
7411 running_mean_ = running_mean;
7412 }
7413
7414 at::Tensor running_var_;
7415 if (at::functionalization::impl::isFunctionalTensor(running_var)) {
7416 at::functionalization::impl::sync(running_var);
7417 running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
7418 } else {
7419 running_var_ = running_var;
7420 }
7421 if (!(true && at::functionalization::impl::isFunctionalTensor(running_mean) && at::functionalization::impl::isFunctionalTensor(running_var))) {
7422 if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
7423 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
7424 TORCH_INTERNAL_ASSERT(false,
7425 "mutating a non-functional tensor with a functional tensor is not allowed.",
7426 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
7427 } else {
7428 // case 2: arguments are not functional tensors, so we no-op and redispatch.
7429 at::AutoDispatchSkipFunctionalize guard;
7430 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_native_batch_norm_legit::call(input_, weight_, bias_, running_mean_, running_var_, training, momentum, eps);
7431 return ::std::tuple<at::Tensor,at::Tensor,at::Tensor>(std::get<0>(tmp_output), std::get<1>(tmp_output), std::get<2>(tmp_output));;
7432 }
7433 } else {
7434 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
7435 {
7436 at::AutoDispatchSkipFunctionalize guard;
7437 tmp_output = at::_ops::_native_batch_norm_legit_functional::call(input_, weight_, bias_, running_mean_, running_var_, training, momentum, eps);
7438 }
7439 auto output_0 = at::functionalization::impl::to_functional_tensor(std::get<0>(tmp_output));
7440 auto output_1 = at::functionalization::impl::to_functional_tensor(std::get<1>(tmp_output));
7441 auto output_2 = at::functionalization::impl::to_functional_tensor(std::get<2>(tmp_output));
7442 at::functionalization::impl::replace_(running_mean, std::get<3>(tmp_output));
7443 at::functionalization::impl::commit_update(running_mean);
7444 at::functionalization::impl::sync(running_mean);
7445 at::functionalization::impl::replace_(running_var, std::get<4>(tmp_output));
7446 at::functionalization::impl::commit_update(running_var);
7447 at::functionalization::impl::sync(running_var);
7448 return ::std::tuple<at::Tensor,at::Tensor,at::Tensor>(output_0, output_1, output_2);
7449 }
7450 }
7451
7452 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out_no_stats_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
7453 if (false) {
7454 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
7455 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
7456 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
7457 auto input_meta = to_meta(input);
7458 auto weight_meta = to_meta(weight);
7459 auto bias_meta = to_meta(bias);
7460 auto out_meta = to_meta(out);
7461 auto save_mean_meta = to_meta(save_mean);
7462 auto save_invstd_meta = to_meta(save_invstd);
7463 at::AutoDispatchSkipFunctionalize func_guard;
7464 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
7465 at::_ops::_native_batch_norm_legit_no_stats_out::call(input_meta, weight_meta, bias_meta, training, momentum, eps, out_meta, save_mean_meta, save_invstd_meta);
7466 }
7467
7468 at::Tensor input_;
7469 if (at::functionalization::impl::isFunctionalTensor(input)) {
7470 at::functionalization::impl::sync(input);
7471 input_ = at::functionalization::impl::from_functional_tensor(input);
7472 } else {
7473 input_ = input;
7474 }
7475
7476 c10::optional<at::Tensor> weight_;
7477 if (at::functionalization::impl::isFunctionalTensor(weight)) {
7478 at::functionalization::impl::sync(weight);
7479 weight_ = at::functionalization::impl::from_functional_tensor(weight);
7480 } else {
7481 weight_ = weight;
7482 }
7483
7484 c10::optional<at::Tensor> bias_;
7485 if (at::functionalization::impl::isFunctionalTensor(bias)) {
7486 at::functionalization::impl::sync(bias);
7487 bias_ = at::functionalization::impl::from_functional_tensor(bias);
7488 } else {
7489 bias_ = bias;
7490 }
7491
7492 at::Tensor out_;
7493 if (at::functionalization::impl::isFunctionalTensor(out)) {
7494 at::functionalization::impl::sync(out);
7495 out_ = at::functionalization::impl::from_functional_tensor(out);
7496 } else {
7497 out_ = out;
7498 }
7499
7500 at::Tensor save_mean_;
7501 if (at::functionalization::impl::isFunctionalTensor(save_mean)) {
7502 at::functionalization::impl::sync(save_mean);
7503 save_mean_ = at::functionalization::impl::from_functional_tensor(save_mean);
7504 } else {
7505 save_mean_ = save_mean;
7506 }
7507
7508 at::Tensor save_invstd_;
7509 if (at::functionalization::impl::isFunctionalTensor(save_invstd)) {
7510 at::functionalization::impl::sync(save_invstd);
7511 save_invstd_ = at::functionalization::impl::from_functional_tensor(save_invstd);
7512 } else {
7513 save_invstd_ = save_invstd;
7514 }
7515 if (!(true && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(save_mean) && at::functionalization::impl::isFunctionalTensor(save_invstd))) {
7516 if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
7517 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
7518 TORCH_INTERNAL_ASSERT(false,
7519 "mutating a non-functional tensor with a functional tensor is not allowed.",
7520 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
7521 } else {
7522 // case 2: arguments are not functional tensors, so we no-op and redispatch.
7523 at::AutoDispatchSkipFunctionalize guard;
7524 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_native_batch_norm_legit_no_stats_out::call(input_, weight_, bias_, training, momentum, eps, out_, save_mean_, save_invstd_);
7525 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd);;
7526 }
7527 } else {
7528 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
7529 {
7530 at::AutoDispatchSkipFunctionalize guard;
7531 tmp_output = at::_ops::_native_batch_norm_legit_no_stats::call(input_, weight_, bias_, training, momentum, eps);
7532 }
7533 at::functionalization::impl::replace_(out, std::get<0>(tmp_output));
7534 at::functionalization::impl::commit_update(out);
7535 at::functionalization::impl::sync(out);
7536 at::functionalization::impl::replace_(save_mean, std::get<1>(tmp_output));
7537 at::functionalization::impl::commit_update(save_mean);
7538 at::functionalization::impl::sync(save_mean);
7539 at::functionalization::impl::replace_(save_invstd, std::get<2>(tmp_output));
7540 at::functionalization::impl::commit_update(save_invstd);
7541 at::functionalization::impl::sync(save_invstd);
7542 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd);
7543 }
7544 }
7545
7546 ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_stats_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) {
7547 if (false) {
7548 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
7549 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
7550 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
7551 auto input_meta = to_meta(input);
7552 auto out0_meta = to_meta(out0);
7553 auto out1_meta = to_meta(out1);
7554 at::AutoDispatchSkipFunctionalize func_guard;
7555 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
7556 at::_ops::batch_norm_stats_out::call(input_meta, eps, out0_meta, out1_meta);
7557 }
7558
7559 at::Tensor input_;
7560 if (at::functionalization::impl::isFunctionalTensor(input)) {
7561 at::functionalization::impl::sync(input);
7562 input_ = at::functionalization::impl::from_functional_tensor(input);
7563 } else {
7564 input_ = input;
7565 }
7566
7567 at::Tensor out0_;
7568 if (at::functionalization::impl::isFunctionalTensor(out0)) {
7569 at::functionalization::impl::sync(out0);
7570 out0_ = at::functionalization::impl::from_functional_tensor(out0);
7571 } else {
7572 out0_ = out0;
7573 }
7574
7575 at::Tensor out1_;
7576 if (at::functionalization::impl::isFunctionalTensor(out1)) {
7577 at::functionalization::impl::sync(out1);
7578 out1_ = at::functionalization::impl::from_functional_tensor(out1);
7579 } else {
7580 out1_ = out1;
7581 }
7582 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
7583 if ((false || at::functionalization::impl::isFunctionalTensor(input))) {
7584 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
7585 TORCH_INTERNAL_ASSERT(false,
7586 "mutating a non-functional tensor with a functional tensor is not allowed.",
7587 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
7588 } else {
7589 // case 2: arguments are not functional tensors, so we no-op and redispatch.
7590 at::AutoDispatchSkipFunctionalize guard;
7591 ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::batch_norm_stats_out::call(input_, eps, out0_, out1_);
7592 return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);;
7593 }
7594 } else {
7595 ::std::tuple<at::Tensor,at::Tensor> tmp_output;
7596 {
7597 at::AutoDispatchSkipFunctionalize guard;
7598 tmp_output = at::_ops::batch_norm_stats::call(input_, eps);
7599 }
7600 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
7601 at::functionalization::impl::commit_update(out0);
7602 at::functionalization::impl::sync(out0);
7603 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
7604 at::functionalization::impl::commit_update(out1);
7605 at::functionalization::impl::sync(out1);
7606 return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
7607 }
7608 }
7609
7610 at::Tensor & batch_norm_backward_elemt_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count, at::Tensor & out) {
7611 if (false) {
7612 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
7613 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
7614 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
7615 auto grad_out_meta = to_meta(grad_out);
7616 auto input_meta = to_meta(input);
7617 auto mean_meta = to_meta(mean);
7618 auto invstd_meta = to_meta(invstd);
7619 auto weight_meta = to_meta(weight);
7620 auto mean_dy_meta = to_meta(mean_dy);
7621 auto mean_dy_xmu_meta = to_meta(mean_dy_xmu);
7622 auto count_meta = to_meta(count);
7623 auto out_meta = to_meta(out);
7624 at::AutoDispatchSkipFunctionalize func_guard;
7625 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
7626 at::_ops::batch_norm_backward_elemt_out::call(grad_out_meta, input_meta, mean_meta, invstd_meta, weight_meta, mean_dy_meta, mean_dy_xmu_meta, count_meta, out_meta);
7627 }
7628
7629 at::Tensor grad_out_;
7630 if (at::functionalization::impl::isFunctionalTensor(grad_out)) {
7631 at::functionalization::impl::sync(grad_out);
7632 grad_out_ = at::functionalization::impl::from_functional_tensor(grad_out);
7633 } else {
7634 grad_out_ = grad_out;
7635 }
7636
7637 at::Tensor input_;
7638 if (at::functionalization::impl::isFunctionalTensor(input)) {
7639 at::functionalization::impl::sync(input);
7640 input_ = at::functionalization::impl::from_functional_tensor(input);
7641 } else {
7642 input_ = input;
7643 }
7644
7645 at::Tensor mean_;
7646 if (at::functionalization::impl::isFunctionalTensor(mean)) {
7647 at::functionalization::impl::sync(mean);
7648 mean_ = at::functionalization::impl::from_functional_tensor(mean);
7649 } else {
7650 mean_ = mean;
7651 }
7652
7653 at::Tensor invstd_;
7654 if (at::functionalization::impl::isFunctionalTensor(invstd)) {
7655 at::functionalization::impl::sync(invstd);
7656 invstd_ = at::functionalization::impl::from_functional_tensor(invstd);
7657 } else {
7658 invstd_ = invstd;
7659 }
7660
7661 c10::optional<at::Tensor> weight_;
7662 if (at::functionalization::impl::isFunctionalTensor(weight)) {
7663 at::functionalization::impl::sync(weight);
7664 weight_ = at::functionalization::impl::from_functional_tensor(weight);
7665 } else {
7666 weight_ = weight;
7667 }
7668
7669 at::Tensor mean_dy_;
7670 if (at::functionalization::impl::isFunctionalTensor(mean_dy)) {
7671 at::functionalization::impl::sync(mean_dy);
7672 mean_dy_ = at::functionalization::impl::from_functional_tensor(mean_dy);
7673 } else {
7674 mean_dy_ = mean_dy;
7675 }
7676
7677 at::Tensor mean_dy_xmu_;
7678 if (at::functionalization::impl::isFunctionalTensor(mean_dy_xmu)) {
7679 at::functionalization::impl::sync(mean_dy_xmu);
7680 mean_dy_xmu_ = at::functionalization::impl::from_functional_tensor(mean_dy_xmu);
7681 } else {
7682 mean_dy_xmu_ = mean_dy_xmu;
7683 }
7684
7685 at::Tensor count_;
7686 if (at::functionalization::impl::isFunctionalTensor(count)) {
7687 at::functionalization::impl::sync(count);
7688 count_ = at::functionalization::impl::from_functional_tensor(count);
7689 } else {
7690 count_ = count;
7691 }
7692
7693 at::Tensor out_;
7694 if (at::functionalization::impl::isFunctionalTensor(out)) {
7695 at::functionalization::impl::sync(out);
7696 out_ = at::functionalization::impl::from_functional_tensor(out);
7697 } else {
7698 out_ = out;
7699 }
7700 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
7701 if ((false || at::functionalization::impl::isFunctionalTensor(grad_out) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(invstd) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(mean_dy) || at::functionalization::impl::isFunctionalTensor(mean_dy_xmu) || at::functionalization::impl::isFunctionalTensor(count))) {
7702 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
7703 TORCH_INTERNAL_ASSERT(false,
7704 "mutating a non-functional tensor with a functional tensor is not allowed.",
7705 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
7706 } else {
7707 // case 2: arguments are not functional tensors, so we no-op and redispatch.
7708 at::AutoDispatchSkipFunctionalize guard;
7709 at::Tensor tmp_output = at::_ops::batch_norm_backward_elemt_out::call(grad_out_, input_, mean_, invstd_, weight_, mean_dy_, mean_dy_xmu_, count_, out_);
7710 return out;;
7711 }
7712 } else {
7713 at::Tensor tmp_output;
7714 {
7715 at::AutoDispatchSkipFunctionalize guard;
7716 tmp_output = at::_ops::batch_norm_backward_elemt::call(grad_out_, input_, mean_, invstd_, weight_, mean_dy_, mean_dy_xmu_, count_);
7717 }
7718 at::functionalization::impl::replace_(out, tmp_output);
7719 at::functionalization::impl::commit_update(out);
7720 at::functionalization::impl::sync(out);
7721 return out;
7722 }
7723 }
7724
7725 at::Tensor & _euclidean_dist_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out) {
7726 if (false) {
7727 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
7728 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
7729 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
7730 auto x1_meta = to_meta(x1);
7731 auto x2_meta = to_meta(x2);
7732 auto out_meta = to_meta(out);
7733 at::AutoDispatchSkipFunctionalize func_guard;
7734 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
7735 at::_ops::_euclidean_dist_out::call(x1_meta, x2_meta, out_meta);
7736 }
7737
7738 at::Tensor x1_;
7739 if (at::functionalization::impl::isFunctionalTensor(x1)) {
7740 at::functionalization::impl::sync(x1);
7741 x1_ = at::functionalization::impl::from_functional_tensor(x1);
7742 } else {
7743 x1_ = x1;
7744 }
7745
7746 at::Tensor x2_;
7747 if (at::functionalization::impl::isFunctionalTensor(x2)) {
7748 at::functionalization::impl::sync(x2);
7749 x2_ = at::functionalization::impl::from_functional_tensor(x2);
7750 } else {
7751 x2_ = x2;
7752 }
7753
7754 at::Tensor out_;
7755 if (at::functionalization::impl::isFunctionalTensor(out)) {
7756 at::functionalization::impl::sync(out);
7757 out_ = at::functionalization::impl::from_functional_tensor(out);
7758 } else {
7759 out_ = out;
7760 }
7761 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
7762 if ((false || at::functionalization::impl::isFunctionalTensor(x1) || at::functionalization::impl::isFunctionalTensor(x2))) {
7763 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
7764 TORCH_INTERNAL_ASSERT(false,
7765 "mutating a non-functional tensor with a functional tensor is not allowed.",
7766 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
7767 } else {
7768 // case 2: arguments are not functional tensors, so we no-op and redispatch.
7769 at::AutoDispatchSkipFunctionalize guard;
7770 at::Tensor tmp_output = at::_ops::_euclidean_dist_out::call(x1_, x2_, out_);
7771 return out;;
7772 }
7773 } else {
7774 at::Tensor tmp_output;
7775 {
7776 at::AutoDispatchSkipFunctionalize guard;
7777 tmp_output = at::_ops::_euclidean_dist::call(x1_, x2_);
7778 }
7779 at::functionalization::impl::replace_(out, tmp_output);
7780 at::functionalization::impl::commit_update(out);
7781 at::functionalization::impl::sync(out);
7782 return out;
7783 }
7784 }
7785
7786 at::Tensor & _cdist_forward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode, at::Tensor & out) {
7787 if (false) {
7788 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
7789 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
7790 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
7791 auto x1_meta = to_meta(x1);
7792 auto x2_meta = to_meta(x2);
7793 auto out_meta = to_meta(out);
7794 at::AutoDispatchSkipFunctionalize func_guard;
7795 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
7796 at::_ops::_cdist_forward_out::call(x1_meta, x2_meta, p, compute_mode, out_meta);
7797 }
7798
7799 at::Tensor x1_;
7800 if (at::functionalization::impl::isFunctionalTensor(x1)) {
7801 at::functionalization::impl::sync(x1);
7802 x1_ = at::functionalization::impl::from_functional_tensor(x1);
7803 } else {
7804 x1_ = x1;
7805 }
7806
7807 at::Tensor x2_;
7808 if (at::functionalization::impl::isFunctionalTensor(x2)) {
7809 at::functionalization::impl::sync(x2);
7810 x2_ = at::functionalization::impl::from_functional_tensor(x2);
7811 } else {
7812 x2_ = x2;
7813 }
7814
7815 at::Tensor out_;
7816 if (at::functionalization::impl::isFunctionalTensor(out)) {
7817 at::functionalization::impl::sync(out);
7818 out_ = at::functionalization::impl::from_functional_tensor(out);
7819 } else {
7820 out_ = out;
7821 }
7822 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
7823 if ((false || at::functionalization::impl::isFunctionalTensor(x1) || at::functionalization::impl::isFunctionalTensor(x2))) {
7824 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
7825 TORCH_INTERNAL_ASSERT(false,
7826 "mutating a non-functional tensor with a functional tensor is not allowed.",
7827 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
7828 } else {
7829 // case 2: arguments are not functional tensors, so we no-op and redispatch.
7830 at::AutoDispatchSkipFunctionalize guard;
7831 at::Tensor tmp_output = at::_ops::_cdist_forward_out::call(x1_, x2_, p, compute_mode, out_);
7832 return out;;
7833 }
7834 } else {
7835 at::Tensor tmp_output;
7836 {
7837 at::AutoDispatchSkipFunctionalize guard;
7838 tmp_output = at::_ops::_cdist_forward::call(x1_, x2_, p, compute_mode);
7839 }
7840 at::functionalization::impl::replace_(out, tmp_output);
7841 at::functionalization::impl::commit_update(out);
7842 at::functionalization::impl::sync(out);
7843 return out;
7844 }
7845 }
7846
7847 at::Tensor & _cdist_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) {
7848 if (false) {
7849 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
7850 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
7851 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
7852 auto grad_meta = to_meta(grad);
7853 auto x1_meta = to_meta(x1);
7854 auto x2_meta = to_meta(x2);
7855 auto cdist_meta = to_meta(cdist);
7856 auto out_meta = to_meta(out);
7857 at::AutoDispatchSkipFunctionalize func_guard;
7858 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
7859 at::_ops::_cdist_backward_out::call(grad_meta, x1_meta, x2_meta, p, cdist_meta, out_meta);
7860 }
7861
7862 at::Tensor grad_;
7863 if (at::functionalization::impl::isFunctionalTensor(grad)) {
7864 at::functionalization::impl::sync(grad);
7865 grad_ = at::functionalization::impl::from_functional_tensor(grad);
7866 } else {
7867 grad_ = grad;
7868 }
7869
7870 at::Tensor x1_;
7871 if (at::functionalization::impl::isFunctionalTensor(x1)) {
7872 at::functionalization::impl::sync(x1);
7873 x1_ = at::functionalization::impl::from_functional_tensor(x1);
7874 } else {
7875 x1_ = x1;
7876 }
7877
7878 at::Tensor x2_;
7879 if (at::functionalization::impl::isFunctionalTensor(x2)) {
7880 at::functionalization::impl::sync(x2);
7881 x2_ = at::functionalization::impl::from_functional_tensor(x2);
7882 } else {
7883 x2_ = x2;
7884 }
7885
7886 at::Tensor cdist_;
7887 if (at::functionalization::impl::isFunctionalTensor(cdist)) {
7888 at::functionalization::impl::sync(cdist);
7889 cdist_ = at::functionalization::impl::from_functional_tensor(cdist);
7890 } else {
7891 cdist_ = cdist;
7892 }
7893
7894 at::Tensor out_;
7895 if (at::functionalization::impl::isFunctionalTensor(out)) {
7896 at::functionalization::impl::sync(out);
7897 out_ = at::functionalization::impl::from_functional_tensor(out);
7898 } else {
7899 out_ = out;
7900 }
7901 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
7902 if ((false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(x1) || at::functionalization::impl::isFunctionalTensor(x2) || at::functionalization::impl::isFunctionalTensor(cdist))) {
7903 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
7904 TORCH_INTERNAL_ASSERT(false,
7905 "mutating a non-functional tensor with a functional tensor is not allowed.",
7906 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
7907 } else {
7908 // case 2: arguments are not functional tensors, so we no-op and redispatch.
7909 at::AutoDispatchSkipFunctionalize guard;
7910 at::Tensor tmp_output = at::_ops::_cdist_backward_out::call(grad_, x1_, x2_, p, cdist_, out_);
7911 return out;;
7912 }
7913 } else {
7914 at::Tensor tmp_output;
7915 {
7916 at::AutoDispatchSkipFunctionalize guard;
7917 tmp_output = at::_ops::_cdist_backward::call(grad_, x1_, x2_, p, cdist_);
7918 }
7919 at::functionalization::impl::replace_(out, tmp_output);
7920 at::functionalization::impl::commit_update(out);
7921 at::functionalization::impl::sync(out);
7922 return out;
7923 }
7924 }
7925
7926 at::Tensor & pixel_unshuffle_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t downscale_factor, at::Tensor & out) {
7927 if (false) {
7928 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
7929 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
7930 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
7931 auto self_meta = to_meta(self);
7932 auto out_meta = to_meta(out);
7933 at::AutoDispatchSkipFunctionalize func_guard;
7934 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
7935 at::_ops::pixel_unshuffle_out::call(self_meta, downscale_factor, out_meta);
7936 }
7937
7938 at::Tensor self_;
7939 if (at::functionalization::impl::isFunctionalTensor(self)) {
7940 at::functionalization::impl::sync(self);
7941 self_ = at::functionalization::impl::from_functional_tensor(self);
7942 } else {
7943 self_ = self;
7944 }
7945
7946 at::Tensor out_;
7947 if (at::functionalization::impl::isFunctionalTensor(out)) {
7948 at::functionalization::impl::sync(out);
7949 out_ = at::functionalization::impl::from_functional_tensor(out);
7950 } else {
7951 out_ = out;
7952 }
7953 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
7954 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
7955 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
7956 TORCH_INTERNAL_ASSERT(false,
7957 "mutating a non-functional tensor with a functional tensor is not allowed.",
7958 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
7959 } else {
7960 // case 2: arguments are not functional tensors, so we no-op and redispatch.
7961 at::AutoDispatchSkipFunctionalize guard;
7962 at::Tensor tmp_output = at::_ops::pixel_unshuffle_out::call(self_, downscale_factor, out_);
7963 return out;;
7964 }
7965 } else {
7966 at::Tensor tmp_output;
7967 {
7968 at::AutoDispatchSkipFunctionalize guard;
7969 tmp_output = at::_ops::pixel_unshuffle::call(self_, downscale_factor);
7970 }
7971 at::functionalization::impl::replace_(out, tmp_output);
7972 at::functionalization::impl::commit_update(out);
7973 at::functionalization::impl::sync(out);
7974 return out;
7975 }
7976 }
7977
7978 at::Tensor & rad2deg_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
7979 if (false) {
7980 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
7981 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
7982 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
7983 auto self_meta = to_meta(self);
7984 auto out_meta = to_meta(out);
7985 at::AutoDispatchSkipFunctionalize func_guard;
7986 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
7987 at::_ops::rad2deg_out::call(self_meta, out_meta);
7988 }
7989
7990 at::Tensor self_;
7991 if (at::functionalization::impl::isFunctionalTensor(self)) {
7992 at::functionalization::impl::sync(self);
7993 self_ = at::functionalization::impl::from_functional_tensor(self);
7994 } else {
7995 self_ = self;
7996 }
7997
7998 at::Tensor out_;
7999 if (at::functionalization::impl::isFunctionalTensor(out)) {
8000 at::functionalization::impl::sync(out);
8001 out_ = at::functionalization::impl::from_functional_tensor(out);
8002 } else {
8003 out_ = out;
8004 }
8005 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8006 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
8007 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8008 TORCH_INTERNAL_ASSERT(false,
8009 "mutating a non-functional tensor with a functional tensor is not allowed.",
8010 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8011 } else {
8012 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8013 at::AutoDispatchSkipFunctionalize guard;
8014 at::Tensor tmp_output = at::_ops::rad2deg_out::call(self_, out_);
8015 return out;;
8016 }
8017 } else {
8018 at::Tensor tmp_output;
8019 {
8020 at::AutoDispatchSkipFunctionalize guard;
8021 tmp_output = at::_ops::rad2deg::call(self_);
8022 }
8023 at::functionalization::impl::replace_(out, tmp_output);
8024 at::functionalization::impl::commit_update(out);
8025 at::functionalization::impl::sync(out);
8026 return out;
8027 }
8028 }
8029
8030 at::Tensor & rad2deg_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
8031 if (true) {
8032 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8033 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8034 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8035 auto self_meta = to_meta(self);
8036 at::AutoDispatchSkipFunctionalize func_guard;
8037 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8038 at::_ops::rad2deg_::call(self_meta);
8039 }
8040
8041 at::Tensor self_;
8042 if (at::functionalization::impl::isFunctionalTensor(self)) {
8043 at::functionalization::impl::sync(self);
8044 self_ = at::functionalization::impl::from_functional_tensor(self);
8045 } else {
8046 self_ = self;
8047 }
8048 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
8049 if ((false)) {
8050 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8051 TORCH_INTERNAL_ASSERT(false,
8052 "mutating a non-functional tensor with a functional tensor is not allowed.",
8053 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8054 } else {
8055 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8056 at::AutoDispatchSkipFunctionalize guard;
8057 at::Tensor tmp_output = at::_ops::rad2deg_::call(self_);
8058 return self;;
8059 }
8060 } else {
8061 at::Tensor tmp_output;
8062 {
8063 at::AutoDispatchSkipFunctionalize guard;
8064 tmp_output = at::_ops::rad2deg::call(self_);
8065 }
8066 at::functionalization::impl::replace_(self, tmp_output);
8067 at::functionalization::impl::commit_update(self);
8068 at::functionalization::impl::sync(self);
8069 return self;
8070 }
8071 }
8072
8073 at::Tensor & scalar_tensor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, at::Tensor & out) {
8074 if (false) {
8075 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8076 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8077 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8078 auto out_meta = to_meta(out);
8079 at::AutoDispatchSkipFunctionalize func_guard;
8080 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8081 at::_ops::scalar_tensor_out::call(s, out_meta);
8082 }
8083
8084 at::Tensor out_;
8085 if (at::functionalization::impl::isFunctionalTensor(out)) {
8086 at::functionalization::impl::sync(out);
8087 out_ = at::functionalization::impl::from_functional_tensor(out);
8088 } else {
8089 out_ = out;
8090 }
8091 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8092 if ((false)) {
8093 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8094 TORCH_INTERNAL_ASSERT(false,
8095 "mutating a non-functional tensor with a functional tensor is not allowed.",
8096 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8097 } else {
8098 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8099 at::AutoDispatchSkipFunctionalize guard;
8100 at::Tensor tmp_output = at::_ops::scalar_tensor_out::call(s, out_);
8101 return out;;
8102 }
8103 } else {
8104 at::Tensor tmp_output;
8105 {
8106 at::AutoDispatchSkipFunctionalize guard;
8107 tmp_output = at::_ops::scalar_tensor::call(s, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
8108 }
8109 at::functionalization::impl::replace_(out, tmp_output);
8110 at::functionalization::impl::commit_update(out);
8111 at::functionalization::impl::sync(out);
8112 return out;
8113 }
8114 }
8115
8116 at::Tensor & rand_out_names_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
8117 if (false) {
8118 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8119 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8120 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8121 auto out_meta = to_meta(out);
8122 at::AutoDispatchSkipFunctionalize func_guard;
8123 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8124 at::_ops::rand_names_out::call(size, names, out_meta);
8125 }
8126
8127 at::Tensor out_;
8128 if (at::functionalization::impl::isFunctionalTensor(out)) {
8129 at::functionalization::impl::sync(out);
8130 out_ = at::functionalization::impl::from_functional_tensor(out);
8131 } else {
8132 out_ = out;
8133 }
8134 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8135 if ((false)) {
8136 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8137 TORCH_INTERNAL_ASSERT(false,
8138 "mutating a non-functional tensor with a functional tensor is not allowed.",
8139 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8140 } else {
8141 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8142 at::AutoDispatchSkipFunctionalize guard;
8143 at::Tensor tmp_output = at::_ops::rand_names_out::call(size, names, out_);
8144 return out;;
8145 }
8146 } else {
8147 at::Tensor tmp_output;
8148 {
8149 at::AutoDispatchSkipFunctionalize guard;
8150 tmp_output = at::_ops::rand_names::call(size, names, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
8151 }
8152 at::functionalization::impl::replace_(out, tmp_output);
8153 at::functionalization::impl::commit_update(out);
8154 at::functionalization::impl::sync(out);
8155 return out;
8156 }
8157 }
8158
8159 at::Tensor & rand_out_generator_with_names_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::Tensor & out) {
8160 if (false) {
8161 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8162 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8163 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8164 auto out_meta = to_meta(out);
8165 at::AutoDispatchSkipFunctionalize func_guard;
8166 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8167 at::_ops::rand_generator_with_names_out::call(size, generator, names, out_meta);
8168 }
8169
8170 at::Tensor out_;
8171 if (at::functionalization::impl::isFunctionalTensor(out)) {
8172 at::functionalization::impl::sync(out);
8173 out_ = at::functionalization::impl::from_functional_tensor(out);
8174 } else {
8175 out_ = out;
8176 }
8177 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8178 if ((false)) {
8179 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8180 TORCH_INTERNAL_ASSERT(false,
8181 "mutating a non-functional tensor with a functional tensor is not allowed.",
8182 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8183 } else {
8184 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8185 at::AutoDispatchSkipFunctionalize guard;
8186 at::Tensor tmp_output = at::_ops::rand_generator_with_names_out::call(size, generator, names, out_);
8187 return out;;
8188 }
8189 } else {
8190 at::Tensor tmp_output;
8191 {
8192 at::AutoDispatchSkipFunctionalize guard;
8193 tmp_output = at::_ops::rand_generator_with_names::call(size, generator, names, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
8194 }
8195 at::functionalization::impl::replace_(out, tmp_output);
8196 at::functionalization::impl::commit_update(out);
8197 at::functionalization::impl::sync(out);
8198 return out;
8199 }
8200 }
8201
8202 at::Tensor & rand_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
8203 if (false) {
8204 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8205 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8206 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8207 auto out_meta = to_meta(out);
8208 at::AutoDispatchSkipFunctionalize func_guard;
8209 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8210 at::_ops::rand_out::call(size, out_meta);
8211 }
8212
8213 at::Tensor out_;
8214 if (at::functionalization::impl::isFunctionalTensor(out)) {
8215 at::functionalization::impl::sync(out);
8216 out_ = at::functionalization::impl::from_functional_tensor(out);
8217 } else {
8218 out_ = out;
8219 }
8220 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8221 if ((false)) {
8222 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8223 TORCH_INTERNAL_ASSERT(false,
8224 "mutating a non-functional tensor with a functional tensor is not allowed.",
8225 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8226 } else {
8227 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8228 at::AutoDispatchSkipFunctionalize guard;
8229 at::Tensor tmp_output = at::_ops::rand_out::call(size, out_);
8230 return out;;
8231 }
8232 } else {
8233 at::Tensor tmp_output;
8234 {
8235 at::AutoDispatchSkipFunctionalize guard;
8236 tmp_output = at::_ops::rand::call(size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
8237 }
8238 at::functionalization::impl::replace_(out, tmp_output);
8239 at::functionalization::impl::commit_update(out);
8240 at::functionalization::impl::sync(out);
8241 return out;
8242 }
8243 }
8244
8245 at::Tensor & rand_out_generator_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
8246 if (false) {
8247 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8248 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8249 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8250 auto out_meta = to_meta(out);
8251 at::AutoDispatchSkipFunctionalize func_guard;
8252 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8253 at::_ops::rand_generator_out::call(size, generator, out_meta);
8254 }
8255
8256 at::Tensor out_;
8257 if (at::functionalization::impl::isFunctionalTensor(out)) {
8258 at::functionalization::impl::sync(out);
8259 out_ = at::functionalization::impl::from_functional_tensor(out);
8260 } else {
8261 out_ = out;
8262 }
8263 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8264 if ((false)) {
8265 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8266 TORCH_INTERNAL_ASSERT(false,
8267 "mutating a non-functional tensor with a functional tensor is not allowed.",
8268 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8269 } else {
8270 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8271 at::AutoDispatchSkipFunctionalize guard;
8272 at::Tensor tmp_output = at::_ops::rand_generator_out::call(size, generator, out_);
8273 return out;;
8274 }
8275 } else {
8276 at::Tensor tmp_output;
8277 {
8278 at::AutoDispatchSkipFunctionalize guard;
8279 tmp_output = at::_ops::rand_generator::call(size, generator, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
8280 }
8281 at::functionalization::impl::replace_(out, tmp_output);
8282 at::functionalization::impl::commit_update(out);
8283 at::functionalization::impl::sync(out);
8284 return out;
8285 }
8286 }
8287
8288 at::Tensor & rand_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
8289 if (false) {
8290 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8291 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8292 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8293 auto self_meta = to_meta(self);
8294 auto out_meta = to_meta(out);
8295 at::AutoDispatchSkipFunctionalize func_guard;
8296 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8297 at::_ops::rand_like_out::call(self_meta, memory_format, out_meta);
8298 }
8299
8300 at::Tensor self_;
8301 if (at::functionalization::impl::isFunctionalTensor(self)) {
8302 at::functionalization::impl::sync(self);
8303 self_ = at::functionalization::impl::from_functional_tensor(self);
8304 } else {
8305 self_ = self;
8306 }
8307
8308 at::Tensor out_;
8309 if (at::functionalization::impl::isFunctionalTensor(out)) {
8310 at::functionalization::impl::sync(out);
8311 out_ = at::functionalization::impl::from_functional_tensor(out);
8312 } else {
8313 out_ = out;
8314 }
8315 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8316 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
8317 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8318 TORCH_INTERNAL_ASSERT(false,
8319 "mutating a non-functional tensor with a functional tensor is not allowed.",
8320 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8321 } else {
8322 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8323 at::AutoDispatchSkipFunctionalize guard;
8324 at::Tensor tmp_output = at::_ops::rand_like_out::call(self_, memory_format, out_);
8325 return out;;
8326 }
8327 } else {
8328 at::Tensor tmp_output;
8329 {
8330 at::AutoDispatchSkipFunctionalize guard;
8331 tmp_output = at::_ops::rand_like::call(self_, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, memory_format);
8332 }
8333 at::functionalization::impl::replace_(out, tmp_output);
8334 at::functionalization::impl::commit_update(out);
8335 at::functionalization::impl::sync(out);
8336 return out;
8337 }
8338 }
8339
8340 at::Tensor & relu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
8341 if (false) {
8342 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8343 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8344 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8345 auto self_meta = to_meta(self);
8346 auto out_meta = to_meta(out);
8347 at::AutoDispatchSkipFunctionalize func_guard;
8348 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8349 at::_ops::relu_out::call(self_meta, out_meta);
8350 }
8351
8352 at::Tensor self_;
8353 if (at::functionalization::impl::isFunctionalTensor(self)) {
8354 at::functionalization::impl::sync(self);
8355 self_ = at::functionalization::impl::from_functional_tensor(self);
8356 } else {
8357 self_ = self;
8358 }
8359
8360 at::Tensor out_;
8361 if (at::functionalization::impl::isFunctionalTensor(out)) {
8362 at::functionalization::impl::sync(out);
8363 out_ = at::functionalization::impl::from_functional_tensor(out);
8364 } else {
8365 out_ = out;
8366 }
8367 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8368 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
8369 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8370 TORCH_INTERNAL_ASSERT(false,
8371 "mutating a non-functional tensor with a functional tensor is not allowed.",
8372 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8373 } else {
8374 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8375 at::AutoDispatchSkipFunctionalize guard;
8376 at::Tensor tmp_output = at::_ops::relu_out::call(self_, out_);
8377 return out;;
8378 }
8379 } else {
8380 at::Tensor tmp_output;
8381 {
8382 at::AutoDispatchSkipFunctionalize guard;
8383 tmp_output = at::_ops::relu::call(self_);
8384 }
8385 at::functionalization::impl::replace_(out, tmp_output);
8386 at::functionalization::impl::commit_update(out);
8387 at::functionalization::impl::sync(out);
8388 return out;
8389 }
8390 }
8391
8392 at::Tensor & relu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
8393 if (true) {
8394 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8395 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8396 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8397 auto self_meta = to_meta(self);
8398 at::AutoDispatchSkipFunctionalize func_guard;
8399 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8400 at::_ops::relu_::call(self_meta);
8401 }
8402
8403 at::Tensor self_;
8404 if (at::functionalization::impl::isFunctionalTensor(self)) {
8405 at::functionalization::impl::sync(self);
8406 self_ = at::functionalization::impl::from_functional_tensor(self);
8407 } else {
8408 self_ = self;
8409 }
8410 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
8411 if ((false)) {
8412 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8413 TORCH_INTERNAL_ASSERT(false,
8414 "mutating a non-functional tensor with a functional tensor is not allowed.",
8415 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8416 } else {
8417 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8418 at::AutoDispatchSkipFunctionalize guard;
8419 at::Tensor tmp_output = at::_ops::relu_::call(self_);
8420 return self;;
8421 }
8422 } else {
8423 at::Tensor tmp_output;
8424 {
8425 at::AutoDispatchSkipFunctionalize guard;
8426 tmp_output = at::_ops::relu::call(self_);
8427 }
8428 at::functionalization::impl::replace_(self, tmp_output);
8429 at::functionalization::impl::commit_update(self);
8430 at::functionalization::impl::sync(self);
8431 return self;
8432 }
8433 }
8434
8435 at::Tensor & logit_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> eps, at::Tensor & out) {
8436 if (false) {
8437 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8438 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8439 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8440 auto self_meta = to_meta(self);
8441 auto out_meta = to_meta(out);
8442 at::AutoDispatchSkipFunctionalize func_guard;
8443 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8444 at::_ops::logit_out::call(self_meta, eps, out_meta);
8445 }
8446
8447 at::Tensor self_;
8448 if (at::functionalization::impl::isFunctionalTensor(self)) {
8449 at::functionalization::impl::sync(self);
8450 self_ = at::functionalization::impl::from_functional_tensor(self);
8451 } else {
8452 self_ = self;
8453 }
8454
8455 at::Tensor out_;
8456 if (at::functionalization::impl::isFunctionalTensor(out)) {
8457 at::functionalization::impl::sync(out);
8458 out_ = at::functionalization::impl::from_functional_tensor(out);
8459 } else {
8460 out_ = out;
8461 }
8462 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8463 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
8464 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8465 TORCH_INTERNAL_ASSERT(false,
8466 "mutating a non-functional tensor with a functional tensor is not allowed.",
8467 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8468 } else {
8469 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8470 at::AutoDispatchSkipFunctionalize guard;
8471 at::Tensor tmp_output = at::_ops::logit_out::call(self_, eps, out_);
8472 return out;;
8473 }
8474 } else {
8475 at::Tensor tmp_output;
8476 {
8477 at::AutoDispatchSkipFunctionalize guard;
8478 tmp_output = at::_ops::logit::call(self_, eps);
8479 }
8480 at::functionalization::impl::replace_(out, tmp_output);
8481 at::functionalization::impl::commit_update(out);
8482 at::functionalization::impl::sync(out);
8483 return out;
8484 }
8485 }
8486
8487 at::Tensor & logit_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional<double> eps) {
8488 if (true) {
8489 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8490 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8491 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8492 auto self_meta = to_meta(self);
8493 at::AutoDispatchSkipFunctionalize func_guard;
8494 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8495 at::_ops::logit_::call(self_meta, eps);
8496 }
8497
8498 at::Tensor self_;
8499 if (at::functionalization::impl::isFunctionalTensor(self)) {
8500 at::functionalization::impl::sync(self);
8501 self_ = at::functionalization::impl::from_functional_tensor(self);
8502 } else {
8503 self_ = self;
8504 }
8505 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
8506 if ((false)) {
8507 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8508 TORCH_INTERNAL_ASSERT(false,
8509 "mutating a non-functional tensor with a functional tensor is not allowed.",
8510 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8511 } else {
8512 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8513 at::AutoDispatchSkipFunctionalize guard;
8514 at::Tensor tmp_output = at::_ops::logit_::call(self_, eps);
8515 return self;;
8516 }
8517 } else {
8518 at::Tensor tmp_output;
8519 {
8520 at::AutoDispatchSkipFunctionalize guard;
8521 tmp_output = at::_ops::logit::call(self_, eps);
8522 }
8523 at::functionalization::impl::replace_(self, tmp_output);
8524 at::functionalization::impl::commit_update(self);
8525 at::functionalization::impl::sync(self);
8526 return self;
8527 }
8528 }
8529
8530 at::Tensor & select_scatter_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index, at::Tensor & out) {
8531 if (false) {
8532 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8533 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8534 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8535 auto self_meta = to_meta(self);
8536 auto src_meta = to_meta(src);
8537 auto out_meta = to_meta(out);
8538 at::AutoDispatchSkipFunctionalize func_guard;
8539 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8540 at::_ops::select_scatter_out::call(self_meta, src_meta, dim, index, out_meta);
8541 }
8542
8543 at::Tensor self_;
8544 if (at::functionalization::impl::isFunctionalTensor(self)) {
8545 at::functionalization::impl::sync(self);
8546 self_ = at::functionalization::impl::from_functional_tensor(self);
8547 } else {
8548 self_ = self;
8549 }
8550
8551 at::Tensor src_;
8552 if (at::functionalization::impl::isFunctionalTensor(src)) {
8553 at::functionalization::impl::sync(src);
8554 src_ = at::functionalization::impl::from_functional_tensor(src);
8555 } else {
8556 src_ = src;
8557 }
8558
8559 at::Tensor out_;
8560 if (at::functionalization::impl::isFunctionalTensor(out)) {
8561 at::functionalization::impl::sync(out);
8562 out_ = at::functionalization::impl::from_functional_tensor(out);
8563 } else {
8564 out_ = out;
8565 }
8566 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8567 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(src))) {
8568 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8569 TORCH_INTERNAL_ASSERT(false,
8570 "mutating a non-functional tensor with a functional tensor is not allowed.",
8571 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8572 } else {
8573 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8574 at::AutoDispatchSkipFunctionalize guard;
8575 at::Tensor tmp_output = at::_ops::select_scatter_out::call(self_, src_, dim, index, out_);
8576 return out;;
8577 }
8578 } else {
8579 at::Tensor tmp_output;
8580 {
8581 at::AutoDispatchSkipFunctionalize guard;
8582 tmp_output = at::_ops::select_scatter::call(self_, src_, dim, index);
8583 }
8584 at::functionalization::impl::replace_(out, tmp_output);
8585 at::functionalization::impl::commit_update(out);
8586 at::functionalization::impl::sync(out);
8587 return out;
8588 }
8589 }
8590
8591 at::Tensor & softmax_out_int_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
8592 if (false) {
8593 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8594 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8595 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8596 auto self_meta = to_meta(self);
8597 auto out_meta = to_meta(out);
8598 at::AutoDispatchSkipFunctionalize func_guard;
8599 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8600 at::_ops::softmax_int_out::call(self_meta, dim, dtype, out_meta);
8601 }
8602
8603 at::Tensor self_;
8604 if (at::functionalization::impl::isFunctionalTensor(self)) {
8605 at::functionalization::impl::sync(self);
8606 self_ = at::functionalization::impl::from_functional_tensor(self);
8607 } else {
8608 self_ = self;
8609 }
8610
8611 at::Tensor out_;
8612 if (at::functionalization::impl::isFunctionalTensor(out)) {
8613 at::functionalization::impl::sync(out);
8614 out_ = at::functionalization::impl::from_functional_tensor(out);
8615 } else {
8616 out_ = out;
8617 }
8618 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8619 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
8620 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8621 TORCH_INTERNAL_ASSERT(false,
8622 "mutating a non-functional tensor with a functional tensor is not allowed.",
8623 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8624 } else {
8625 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8626 at::AutoDispatchSkipFunctionalize guard;
8627 at::Tensor tmp_output = at::_ops::softmax_int_out::call(self_, dim, dtype, out_);
8628 return out;;
8629 }
8630 } else {
8631 at::Tensor tmp_output;
8632 {
8633 at::AutoDispatchSkipFunctionalize guard;
8634 tmp_output = at::_ops::softmax_int::call(self_, dim, dtype);
8635 }
8636 at::functionalization::impl::replace_(out, tmp_output);
8637 at::functionalization::impl::commit_update(out);
8638 at::functionalization::impl::sync(out);
8639 return out;
8640 }
8641 }
8642
8643 at::Tensor & stack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
8644 if (false) {
8645 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8646 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8647 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8648 auto tensors_meta = to_meta(tensors);
8649 auto out_meta = to_meta(out);
8650 at::AutoDispatchSkipFunctionalize func_guard;
8651 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8652 at::_ops::stack_out::call(tensors_meta, dim, out_meta);
8653 }
8654
8655 ::std::vector<at::Tensor> tensors_;
8656 if (at::functionalization::impl::isFunctionalTensor(tensors)) {
8657 at::functionalization::impl::sync(tensors);
8658 tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
8659 } else {
8660 tensors_ = tensors.vec();
8661 }
8662
8663 at::Tensor out_;
8664 if (at::functionalization::impl::isFunctionalTensor(out)) {
8665 at::functionalization::impl::sync(out);
8666 out_ = at::functionalization::impl::from_functional_tensor(out);
8667 } else {
8668 out_ = out;
8669 }
8670 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8671 if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) {
8672 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8673 TORCH_INTERNAL_ASSERT(false,
8674 "mutating a non-functional tensor with a functional tensor is not allowed.",
8675 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8676 } else {
8677 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8678 at::AutoDispatchSkipFunctionalize guard;
8679 at::Tensor tmp_output = at::_ops::stack_out::call(tensors_, dim, out_);
8680 return out;;
8681 }
8682 } else {
8683 at::Tensor tmp_output;
8684 {
8685 at::AutoDispatchSkipFunctionalize guard;
8686 tmp_output = at::_ops::stack::call(tensors_, dim);
8687 }
8688 at::functionalization::impl::replace_(out, tmp_output);
8689 at::functionalization::impl::commit_update(out);
8690 at::functionalization::impl::sync(out);
8691 return out;
8692 }
8693 }
8694
8695 at::Tensor & vstack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
8696 if (false) {
8697 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8698 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8699 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8700 auto tensors_meta = to_meta(tensors);
8701 auto out_meta = to_meta(out);
8702 at::AutoDispatchSkipFunctionalize func_guard;
8703 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8704 at::_ops::vstack_out::call(tensors_meta, out_meta);
8705 }
8706
8707 ::std::vector<at::Tensor> tensors_;
8708 if (at::functionalization::impl::isFunctionalTensor(tensors)) {
8709 at::functionalization::impl::sync(tensors);
8710 tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
8711 } else {
8712 tensors_ = tensors.vec();
8713 }
8714
8715 at::Tensor out_;
8716 if (at::functionalization::impl::isFunctionalTensor(out)) {
8717 at::functionalization::impl::sync(out);
8718 out_ = at::functionalization::impl::from_functional_tensor(out);
8719 } else {
8720 out_ = out;
8721 }
8722 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8723 if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) {
8724 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8725 TORCH_INTERNAL_ASSERT(false,
8726 "mutating a non-functional tensor with a functional tensor is not allowed.",
8727 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8728 } else {
8729 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8730 at::AutoDispatchSkipFunctionalize guard;
8731 at::Tensor tmp_output = at::_ops::vstack_out::call(tensors_, out_);
8732 return out;;
8733 }
8734 } else {
8735 at::Tensor tmp_output;
8736 {
8737 at::AutoDispatchSkipFunctionalize guard;
8738 tmp_output = at::_ops::vstack::call(tensors_);
8739 }
8740 at::functionalization::impl::replace_(out, tmp_output);
8741 at::functionalization::impl::commit_update(out);
8742 at::functionalization::impl::sync(out);
8743 return out;
8744 }
8745 }
8746
8747 at::Tensor & nansum_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
8748 if (false) {
8749 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8750 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8751 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8752 auto self_meta = to_meta(self);
8753 auto out_meta = to_meta(out);
8754 at::AutoDispatchSkipFunctionalize func_guard;
8755 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8756 at::_ops::nansum_out::call(self_meta, dim, keepdim, dtype, out_meta);
8757 }
8758
8759 at::Tensor self_;
8760 if (at::functionalization::impl::isFunctionalTensor(self)) {
8761 at::functionalization::impl::sync(self);
8762 self_ = at::functionalization::impl::from_functional_tensor(self);
8763 } else {
8764 self_ = self;
8765 }
8766
8767 at::Tensor out_;
8768 if (at::functionalization::impl::isFunctionalTensor(out)) {
8769 at::functionalization::impl::sync(out);
8770 out_ = at::functionalization::impl::from_functional_tensor(out);
8771 } else {
8772 out_ = out;
8773 }
8774 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8775 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
8776 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8777 TORCH_INTERNAL_ASSERT(false,
8778 "mutating a non-functional tensor with a functional tensor is not allowed.",
8779 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8780 } else {
8781 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8782 at::AutoDispatchSkipFunctionalize guard;
8783 at::Tensor tmp_output = at::_ops::nansum_out::call(self_, dim, keepdim, dtype, out_);
8784 return out;;
8785 }
8786 } else {
8787 at::Tensor tmp_output;
8788 {
8789 at::AutoDispatchSkipFunctionalize guard;
8790 tmp_output = at::_ops::nansum::call(self_, dim, keepdim, dtype);
8791 }
8792 at::functionalization::impl::replace_(out, tmp_output);
8793 at::functionalization::impl::commit_update(out);
8794 at::functionalization::impl::sync(out);
8795 return out;
8796 }
8797 }
8798
8799 at::Tensor & sqrt_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
8800 if (false) {
8801 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8802 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8803 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8804 auto self_meta = to_meta(self);
8805 auto out_meta = to_meta(out);
8806 at::AutoDispatchSkipFunctionalize func_guard;
8807 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8808 at::_ops::sqrt_out::call(self_meta, out_meta);
8809 }
8810
8811 at::Tensor self_;
8812 if (at::functionalization::impl::isFunctionalTensor(self)) {
8813 at::functionalization::impl::sync(self);
8814 self_ = at::functionalization::impl::from_functional_tensor(self);
8815 } else {
8816 self_ = self;
8817 }
8818
8819 at::Tensor out_;
8820 if (at::functionalization::impl::isFunctionalTensor(out)) {
8821 at::functionalization::impl::sync(out);
8822 out_ = at::functionalization::impl::from_functional_tensor(out);
8823 } else {
8824 out_ = out;
8825 }
8826 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8827 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
8828 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8829 TORCH_INTERNAL_ASSERT(false,
8830 "mutating a non-functional tensor with a functional tensor is not allowed.",
8831 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8832 } else {
8833 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8834 at::AutoDispatchSkipFunctionalize guard;
8835 at::Tensor tmp_output = at::_ops::sqrt_out::call(self_, out_);
8836 return out;;
8837 }
8838 } else {
8839 at::Tensor tmp_output;
8840 {
8841 at::AutoDispatchSkipFunctionalize guard;
8842 tmp_output = at::_ops::sqrt::call(self_);
8843 }
8844 at::functionalization::impl::replace_(out, tmp_output);
8845 at::functionalization::impl::commit_update(out);
8846 at::functionalization::impl::sync(out);
8847 return out;
8848 }
8849 }
8850
8851 at::Tensor & sqrt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
8852 if (true) {
8853 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8854 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8855 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8856 auto self_meta = to_meta(self);
8857 at::AutoDispatchSkipFunctionalize func_guard;
8858 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8859 at::_ops::sqrt_::call(self_meta);
8860 }
8861
8862 at::Tensor self_;
8863 if (at::functionalization::impl::isFunctionalTensor(self)) {
8864 at::functionalization::impl::sync(self);
8865 self_ = at::functionalization::impl::from_functional_tensor(self);
8866 } else {
8867 self_ = self;
8868 }
8869 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
8870 if ((false)) {
8871 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8872 TORCH_INTERNAL_ASSERT(false,
8873 "mutating a non-functional tensor with a functional tensor is not allowed.",
8874 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8875 } else {
8876 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8877 at::AutoDispatchSkipFunctionalize guard;
8878 at::Tensor tmp_output = at::_ops::sqrt_::call(self_);
8879 return self;;
8880 }
8881 } else {
8882 at::Tensor tmp_output;
8883 {
8884 at::AutoDispatchSkipFunctionalize guard;
8885 tmp_output = at::_ops::sqrt::call(self_);
8886 }
8887 at::functionalization::impl::replace_(self, tmp_output);
8888 at::functionalization::impl::commit_update(self);
8889 at::functionalization::impl::sync(self);
8890 return self;
8891 }
8892 }
8893
8894 at::Tensor & prod_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
8895 if (false) {
8896 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8897 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8898 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8899 auto self_meta = to_meta(self);
8900 auto out_meta = to_meta(out);
8901 at::AutoDispatchSkipFunctionalize func_guard;
8902 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8903 at::_ops::prod_out::call(self_meta, dtype, out_meta);
8904 }
8905
8906 at::Tensor self_;
8907 if (at::functionalization::impl::isFunctionalTensor(self)) {
8908 at::functionalization::impl::sync(self);
8909 self_ = at::functionalization::impl::from_functional_tensor(self);
8910 } else {
8911 self_ = self;
8912 }
8913
8914 at::Tensor out_;
8915 if (at::functionalization::impl::isFunctionalTensor(out)) {
8916 at::functionalization::impl::sync(out);
8917 out_ = at::functionalization::impl::from_functional_tensor(out);
8918 } else {
8919 out_ = out;
8920 }
8921 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8922 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
8923 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8924 TORCH_INTERNAL_ASSERT(false,
8925 "mutating a non-functional tensor with a functional tensor is not allowed.",
8926 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8927 } else {
8928 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8929 at::AutoDispatchSkipFunctionalize guard;
8930 at::Tensor tmp_output = at::_ops::prod_out::call(self_, dtype, out_);
8931 return out;;
8932 }
8933 } else {
8934 at::Tensor tmp_output;
8935 {
8936 at::AutoDispatchSkipFunctionalize guard;
8937 tmp_output = at::_ops::prod::call(self_, dtype);
8938 }
8939 at::functionalization::impl::replace_(out, tmp_output);
8940 at::functionalization::impl::commit_update(out);
8941 at::functionalization::impl::sync(out);
8942 return out;
8943 }
8944 }
8945
8946 at::Tensor & prod_out_int_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
8947 if (false) {
8948 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
8949 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
8950 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
8951 auto self_meta = to_meta(self);
8952 auto out_meta = to_meta(out);
8953 at::AutoDispatchSkipFunctionalize func_guard;
8954 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
8955 at::_ops::prod_int_out::call(self_meta, dim, keepdim, dtype, out_meta);
8956 }
8957
8958 at::Tensor self_;
8959 if (at::functionalization::impl::isFunctionalTensor(self)) {
8960 at::functionalization::impl::sync(self);
8961 self_ = at::functionalization::impl::from_functional_tensor(self);
8962 } else {
8963 self_ = self;
8964 }
8965
8966 at::Tensor out_;
8967 if (at::functionalization::impl::isFunctionalTensor(out)) {
8968 at::functionalization::impl::sync(out);
8969 out_ = at::functionalization::impl::from_functional_tensor(out);
8970 } else {
8971 out_ = out;
8972 }
8973 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
8974 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
8975 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
8976 TORCH_INTERNAL_ASSERT(false,
8977 "mutating a non-functional tensor with a functional tensor is not allowed.",
8978 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
8979 } else {
8980 // case 2: arguments are not functional tensors, so we no-op and redispatch.
8981 at::AutoDispatchSkipFunctionalize guard;
8982 at::Tensor tmp_output = at::_ops::prod_int_out::call(self_, dim, keepdim, dtype, out_);
8983 return out;;
8984 }
8985 } else {
8986 at::Tensor tmp_output;
8987 {
8988 at::AutoDispatchSkipFunctionalize guard;
8989 tmp_output = at::_ops::prod_dim_int::call(self_, dim, keepdim, dtype);
8990 }
8991 at::functionalization::impl::replace_(out, tmp_output);
8992 at::functionalization::impl::commit_update(out);
8993 at::functionalization::impl::sync(out);
8994 return out;
8995 }
8996 }
8997
8998 at::Tensor & prod_out_Dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
8999 if (false) {
9000 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9001 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9002 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9003 auto self_meta = to_meta(self);
9004 auto out_meta = to_meta(out);
9005 at::AutoDispatchSkipFunctionalize func_guard;
9006 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9007 at::_ops::prod_Dimname_out::call(self_meta, dim, keepdim, dtype, out_meta);
9008 }
9009
9010 at::Tensor self_;
9011 if (at::functionalization::impl::isFunctionalTensor(self)) {
9012 at::functionalization::impl::sync(self);
9013 self_ = at::functionalization::impl::from_functional_tensor(self);
9014 } else {
9015 self_ = self;
9016 }
9017
9018 at::Tensor out_;
9019 if (at::functionalization::impl::isFunctionalTensor(out)) {
9020 at::functionalization::impl::sync(out);
9021 out_ = at::functionalization::impl::from_functional_tensor(out);
9022 } else {
9023 out_ = out;
9024 }
9025 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
9026 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
9027 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9028 TORCH_INTERNAL_ASSERT(false,
9029 "mutating a non-functional tensor with a functional tensor is not allowed.",
9030 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9031 } else {
9032 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9033 at::AutoDispatchSkipFunctionalize guard;
9034 at::Tensor tmp_output = at::_ops::prod_Dimname_out::call(self_, dim, keepdim, dtype, out_);
9035 return out;;
9036 }
9037 } else {
9038 at::Tensor tmp_output;
9039 {
9040 at::AutoDispatchSkipFunctionalize guard;
9041 tmp_output = at::_ops::prod_dim_Dimname::call(self_, dim, keepdim, dtype);
9042 }
9043 at::functionalization::impl::replace_(out, tmp_output);
9044 at::functionalization::impl::commit_update(out);
9045 at::functionalization::impl::sync(out);
9046 return out;
9047 }
9048 }
9049
9050 at::Tensor & threshold_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
9051 if (false) {
9052 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9053 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9054 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9055 auto grad_output_meta = to_meta(grad_output);
9056 auto self_meta = to_meta(self);
9057 auto grad_input_meta = to_meta(grad_input);
9058 at::AutoDispatchSkipFunctionalize func_guard;
9059 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9060 at::_ops::threshold_backward_grad_input::call(grad_output_meta, self_meta, threshold, grad_input_meta);
9061 }
9062
9063 at::Tensor grad_output_;
9064 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
9065 at::functionalization::impl::sync(grad_output);
9066 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
9067 } else {
9068 grad_output_ = grad_output;
9069 }
9070
9071 at::Tensor self_;
9072 if (at::functionalization::impl::isFunctionalTensor(self)) {
9073 at::functionalization::impl::sync(self);
9074 self_ = at::functionalization::impl::from_functional_tensor(self);
9075 } else {
9076 self_ = self;
9077 }
9078
9079 at::Tensor grad_input_;
9080 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
9081 at::functionalization::impl::sync(grad_input);
9082 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
9083 } else {
9084 grad_input_ = grad_input;
9085 }
9086 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
9087 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
9088 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9089 TORCH_INTERNAL_ASSERT(false,
9090 "mutating a non-functional tensor with a functional tensor is not allowed.",
9091 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9092 } else {
9093 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9094 at::AutoDispatchSkipFunctionalize guard;
9095 at::Tensor tmp_output = at::_ops::threshold_backward_grad_input::call(grad_output_, self_, threshold, grad_input_);
9096 return grad_input;;
9097 }
9098 } else {
9099 at::Tensor tmp_output;
9100 {
9101 at::AutoDispatchSkipFunctionalize guard;
9102 tmp_output = at::_ops::threshold_backward::call(grad_output_, self_, threshold);
9103 }
9104 at::functionalization::impl::replace_(grad_input, tmp_output);
9105 at::functionalization::impl::commit_update(grad_input);
9106 at::functionalization::impl::sync(grad_input);
9107 return grad_input;
9108 }
9109 }
9110
9111 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transform_bias_rescale_qkv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
9112 if (false) {
9113 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9114 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9115 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9116 auto qkv_meta = to_meta(qkv);
9117 auto qkv_bias_meta = to_meta(qkv_bias);
9118 auto out0_meta = to_meta(out0);
9119 auto out1_meta = to_meta(out1);
9120 auto out2_meta = to_meta(out2);
9121 at::AutoDispatchSkipFunctionalize func_guard;
9122 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9123 at::_ops::_transform_bias_rescale_qkv_out::call(qkv_meta, qkv_bias_meta, num_heads, out0_meta, out1_meta, out2_meta);
9124 }
9125
9126 at::Tensor qkv_;
9127 if (at::functionalization::impl::isFunctionalTensor(qkv)) {
9128 at::functionalization::impl::sync(qkv);
9129 qkv_ = at::functionalization::impl::from_functional_tensor(qkv);
9130 } else {
9131 qkv_ = qkv;
9132 }
9133
9134 at::Tensor qkv_bias_;
9135 if (at::functionalization::impl::isFunctionalTensor(qkv_bias)) {
9136 at::functionalization::impl::sync(qkv_bias);
9137 qkv_bias_ = at::functionalization::impl::from_functional_tensor(qkv_bias);
9138 } else {
9139 qkv_bias_ = qkv_bias;
9140 }
9141
9142 at::Tensor out0_;
9143 if (at::functionalization::impl::isFunctionalTensor(out0)) {
9144 at::functionalization::impl::sync(out0);
9145 out0_ = at::functionalization::impl::from_functional_tensor(out0);
9146 } else {
9147 out0_ = out0;
9148 }
9149
9150 at::Tensor out1_;
9151 if (at::functionalization::impl::isFunctionalTensor(out1)) {
9152 at::functionalization::impl::sync(out1);
9153 out1_ = at::functionalization::impl::from_functional_tensor(out1);
9154 } else {
9155 out1_ = out1;
9156 }
9157
9158 at::Tensor out2_;
9159 if (at::functionalization::impl::isFunctionalTensor(out2)) {
9160 at::functionalization::impl::sync(out2);
9161 out2_ = at::functionalization::impl::from_functional_tensor(out2);
9162 } else {
9163 out2_ = out2;
9164 }
9165 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
9166 if ((false || at::functionalization::impl::isFunctionalTensor(qkv) || at::functionalization::impl::isFunctionalTensor(qkv_bias))) {
9167 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9168 TORCH_INTERNAL_ASSERT(false,
9169 "mutating a non-functional tensor with a functional tensor is not allowed.",
9170 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9171 } else {
9172 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9173 at::AutoDispatchSkipFunctionalize guard;
9174 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_transform_bias_rescale_qkv_out::call(qkv_, qkv_bias_, num_heads, out0_, out1_, out2_);
9175 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);;
9176 }
9177 } else {
9178 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
9179 {
9180 at::AutoDispatchSkipFunctionalize guard;
9181 tmp_output = at::_ops::_transform_bias_rescale_qkv::call(qkv_, qkv_bias_, num_heads);
9182 }
9183 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
9184 at::functionalization::impl::commit_update(out0);
9185 at::functionalization::impl::sync(out0);
9186 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
9187 at::functionalization::impl::commit_update(out1);
9188 at::functionalization::impl::sync(out1);
9189 at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
9190 at::functionalization::impl::commit_update(out2);
9191 at::functionalization::impl::sync(out2);
9192 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
9193 }
9194 }
9195
9196 at::Tensor & _nested_from_padded_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out) {
9197 if (false) {
9198 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9199 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9200 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9201 auto padded_meta = to_meta(padded);
9202 auto cpu_nested_shape_example_meta = to_meta(cpu_nested_shape_example);
9203 auto out_meta = to_meta(out);
9204 at::AutoDispatchSkipFunctionalize func_guard;
9205 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9206 at::_ops::_nested_from_padded_out::call(padded_meta, cpu_nested_shape_example_meta, fuse_transform_0213, out_meta);
9207 }
9208
9209 at::Tensor padded_;
9210 if (at::functionalization::impl::isFunctionalTensor(padded)) {
9211 at::functionalization::impl::sync(padded);
9212 padded_ = at::functionalization::impl::from_functional_tensor(padded);
9213 } else {
9214 padded_ = padded;
9215 }
9216
9217 at::Tensor cpu_nested_shape_example_;
9218 if (at::functionalization::impl::isFunctionalTensor(cpu_nested_shape_example)) {
9219 at::functionalization::impl::sync(cpu_nested_shape_example);
9220 cpu_nested_shape_example_ = at::functionalization::impl::from_functional_tensor(cpu_nested_shape_example);
9221 } else {
9222 cpu_nested_shape_example_ = cpu_nested_shape_example;
9223 }
9224
9225 at::Tensor out_;
9226 if (at::functionalization::impl::isFunctionalTensor(out)) {
9227 at::functionalization::impl::sync(out);
9228 out_ = at::functionalization::impl::from_functional_tensor(out);
9229 } else {
9230 out_ = out;
9231 }
9232 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
9233 if ((false || at::functionalization::impl::isFunctionalTensor(padded) || at::functionalization::impl::isFunctionalTensor(cpu_nested_shape_example))) {
9234 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9235 TORCH_INTERNAL_ASSERT(false,
9236 "mutating a non-functional tensor with a functional tensor is not allowed.",
9237 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9238 } else {
9239 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9240 at::AutoDispatchSkipFunctionalize guard;
9241 at::Tensor tmp_output = at::_ops::_nested_from_padded_out::call(padded_, cpu_nested_shape_example_, fuse_transform_0213, out_);
9242 return out;;
9243 }
9244 } else {
9245 at::Tensor tmp_output;
9246 {
9247 at::AutoDispatchSkipFunctionalize guard;
9248 tmp_output = at::_ops::_nested_from_padded::call(padded_, cpu_nested_shape_example_, fuse_transform_0213);
9249 }
9250 at::functionalization::impl::replace_(out, tmp_output);
9251 at::functionalization::impl::commit_update(out);
9252 at::functionalization::impl::sync(out);
9253 return out;
9254 }
9255 }
9256
9257 at::Tensor & _nested_tensor_size_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
9258 if (false) {
9259 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9260 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9261 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9262 auto self_meta = to_meta(self);
9263 auto out_meta = to_meta(out);
9264 at::AutoDispatchSkipFunctionalize func_guard;
9265 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9266 at::_ops::_nested_tensor_size_out::call(self_meta, out_meta);
9267 }
9268
9269 at::Tensor self_;
9270 if (at::functionalization::impl::isFunctionalTensor(self)) {
9271 at::functionalization::impl::sync(self);
9272 self_ = at::functionalization::impl::from_functional_tensor(self);
9273 } else {
9274 self_ = self;
9275 }
9276
9277 at::Tensor out_;
9278 if (at::functionalization::impl::isFunctionalTensor(out)) {
9279 at::functionalization::impl::sync(out);
9280 out_ = at::functionalization::impl::from_functional_tensor(out);
9281 } else {
9282 out_ = out;
9283 }
9284 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
9285 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
9286 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9287 TORCH_INTERNAL_ASSERT(false,
9288 "mutating a non-functional tensor with a functional tensor is not allowed.",
9289 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9290 } else {
9291 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9292 at::AutoDispatchSkipFunctionalize guard;
9293 at::Tensor tmp_output = at::_ops::_nested_tensor_size_out::call(self_, out_);
9294 return out;;
9295 }
9296 } else {
9297 at::Tensor tmp_output;
9298 {
9299 at::AutoDispatchSkipFunctionalize guard;
9300 tmp_output = at::_ops::_nested_tensor_size::call(self_);
9301 }
9302 at::functionalization::impl::replace_(out, tmp_output);
9303 at::functionalization::impl::commit_update(out);
9304 at::functionalization::impl::sync(out);
9305 return out;
9306 }
9307 }
9308
9309 at::Tensor & _nested_view_from_buffer_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets, at::Tensor & out) {
9310 if (false) {
9311 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9312 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9313 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9314 auto self_meta = to_meta(self);
9315 auto nested_size_meta = to_meta(nested_size);
9316 auto nested_strides_meta = to_meta(nested_strides);
9317 auto out_meta = to_meta(out);
9318 at::AutoDispatchSkipFunctionalize func_guard;
9319 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9320 at::_ops::_nested_view_from_buffer_copy_out::call(self_meta, nested_size_meta, nested_strides_meta, offsets, out_meta);
9321 }
9322
9323 at::Tensor self_;
9324 if (at::functionalization::impl::isFunctionalTensor(self)) {
9325 at::functionalization::impl::sync(self);
9326 self_ = at::functionalization::impl::from_functional_tensor(self);
9327 } else {
9328 self_ = self;
9329 }
9330
9331 at::Tensor nested_size_;
9332 if (at::functionalization::impl::isFunctionalTensor(nested_size)) {
9333 at::functionalization::impl::sync(nested_size);
9334 nested_size_ = at::functionalization::impl::from_functional_tensor(nested_size);
9335 } else {
9336 nested_size_ = nested_size;
9337 }
9338
9339 at::Tensor nested_strides_;
9340 if (at::functionalization::impl::isFunctionalTensor(nested_strides)) {
9341 at::functionalization::impl::sync(nested_strides);
9342 nested_strides_ = at::functionalization::impl::from_functional_tensor(nested_strides);
9343 } else {
9344 nested_strides_ = nested_strides;
9345 }
9346
9347 at::Tensor out_;
9348 if (at::functionalization::impl::isFunctionalTensor(out)) {
9349 at::functionalization::impl::sync(out);
9350 out_ = at::functionalization::impl::from_functional_tensor(out);
9351 } else {
9352 out_ = out;
9353 }
9354 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
9355 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(nested_size) || at::functionalization::impl::isFunctionalTensor(nested_strides))) {
9356 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9357 TORCH_INTERNAL_ASSERT(false,
9358 "mutating a non-functional tensor with a functional tensor is not allowed.",
9359 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9360 } else {
9361 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9362 at::AutoDispatchSkipFunctionalize guard;
9363 at::Tensor tmp_output = at::_ops::_nested_view_from_buffer_copy_out::call(self_, nested_size_, nested_strides_, offsets, out_);
9364 return out;;
9365 }
9366 } else {
9367 at::Tensor tmp_output;
9368 {
9369 at::AutoDispatchSkipFunctionalize guard;
9370 tmp_output = at::_ops::_nested_view_from_buffer_copy::call(self_, nested_size_, nested_strides_, offsets);
9371 }
9372 at::functionalization::impl::replace_(out, tmp_output);
9373 at::functionalization::impl::commit_update(out);
9374 at::functionalization::impl::sync(out);
9375 return out;
9376 }
9377 }
9378
9379 at::Tensor & trunc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
9380 if (false) {
9381 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9382 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9383 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9384 auto self_meta = to_meta(self);
9385 auto out_meta = to_meta(out);
9386 at::AutoDispatchSkipFunctionalize func_guard;
9387 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9388 at::_ops::trunc_out::call(self_meta, out_meta);
9389 }
9390
9391 at::Tensor self_;
9392 if (at::functionalization::impl::isFunctionalTensor(self)) {
9393 at::functionalization::impl::sync(self);
9394 self_ = at::functionalization::impl::from_functional_tensor(self);
9395 } else {
9396 self_ = self;
9397 }
9398
9399 at::Tensor out_;
9400 if (at::functionalization::impl::isFunctionalTensor(out)) {
9401 at::functionalization::impl::sync(out);
9402 out_ = at::functionalization::impl::from_functional_tensor(out);
9403 } else {
9404 out_ = out;
9405 }
9406 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
9407 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
9408 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9409 TORCH_INTERNAL_ASSERT(false,
9410 "mutating a non-functional tensor with a functional tensor is not allowed.",
9411 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9412 } else {
9413 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9414 at::AutoDispatchSkipFunctionalize guard;
9415 at::Tensor tmp_output = at::_ops::trunc_out::call(self_, out_);
9416 return out;;
9417 }
9418 } else {
9419 at::Tensor tmp_output;
9420 {
9421 at::AutoDispatchSkipFunctionalize guard;
9422 tmp_output = at::_ops::trunc::call(self_);
9423 }
9424 at::functionalization::impl::replace_(out, tmp_output);
9425 at::functionalization::impl::commit_update(out);
9426 at::functionalization::impl::sync(out);
9427 return out;
9428 }
9429 }
9430
9431 at::Tensor & trunc_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
9432 if (true) {
9433 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9434 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9435 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9436 auto self_meta = to_meta(self);
9437 at::AutoDispatchSkipFunctionalize func_guard;
9438 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9439 at::_ops::trunc_::call(self_meta);
9440 }
9441
9442 at::Tensor self_;
9443 if (at::functionalization::impl::isFunctionalTensor(self)) {
9444 at::functionalization::impl::sync(self);
9445 self_ = at::functionalization::impl::from_functional_tensor(self);
9446 } else {
9447 self_ = self;
9448 }
9449 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
9450 if ((false)) {
9451 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9452 TORCH_INTERNAL_ASSERT(false,
9453 "mutating a non-functional tensor with a functional tensor is not allowed.",
9454 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9455 } else {
9456 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9457 at::AutoDispatchSkipFunctionalize guard;
9458 at::Tensor tmp_output = at::_ops::trunc_::call(self_);
9459 return self;;
9460 }
9461 } else {
9462 at::Tensor tmp_output;
9463 {
9464 at::AutoDispatchSkipFunctionalize guard;
9465 tmp_output = at::_ops::trunc::call(self_);
9466 }
9467 at::functionalization::impl::replace_(self, tmp_output);
9468 at::functionalization::impl::commit_update(self);
9469 at::functionalization::impl::sync(self);
9470 return self;
9471 }
9472 }
9473
9474 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_consecutive_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
9475 if (false) {
9476 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9477 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9478 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9479 auto self_meta = to_meta(self);
9480 auto out0_meta = to_meta(out0);
9481 auto out1_meta = to_meta(out1);
9482 auto out2_meta = to_meta(out2);
9483 at::AutoDispatchSkipFunctionalize func_guard;
9484 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9485 at::_ops::unique_dim_consecutive_out::call(self_meta, dim, return_inverse, return_counts, out0_meta, out1_meta, out2_meta);
9486 }
9487
9488 at::Tensor self_;
9489 if (at::functionalization::impl::isFunctionalTensor(self)) {
9490 at::functionalization::impl::sync(self);
9491 self_ = at::functionalization::impl::from_functional_tensor(self);
9492 } else {
9493 self_ = self;
9494 }
9495
9496 at::Tensor out0_;
9497 if (at::functionalization::impl::isFunctionalTensor(out0)) {
9498 at::functionalization::impl::sync(out0);
9499 out0_ = at::functionalization::impl::from_functional_tensor(out0);
9500 } else {
9501 out0_ = out0;
9502 }
9503
9504 at::Tensor out1_;
9505 if (at::functionalization::impl::isFunctionalTensor(out1)) {
9506 at::functionalization::impl::sync(out1);
9507 out1_ = at::functionalization::impl::from_functional_tensor(out1);
9508 } else {
9509 out1_ = out1;
9510 }
9511
9512 at::Tensor out2_;
9513 if (at::functionalization::impl::isFunctionalTensor(out2)) {
9514 at::functionalization::impl::sync(out2);
9515 out2_ = at::functionalization::impl::from_functional_tensor(out2);
9516 } else {
9517 out2_ = out2;
9518 }
9519 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
9520 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
9521 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9522 TORCH_INTERNAL_ASSERT(false,
9523 "mutating a non-functional tensor with a functional tensor is not allowed.",
9524 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9525 } else {
9526 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9527 at::AutoDispatchSkipFunctionalize guard;
9528 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::unique_dim_consecutive_out::call(self_, dim, return_inverse, return_counts, out0_, out1_, out2_);
9529 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);;
9530 }
9531 } else {
9532 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
9533 {
9534 at::AutoDispatchSkipFunctionalize guard;
9535 tmp_output = at::_ops::unique_dim_consecutive::call(self_, dim, return_inverse, return_counts);
9536 }
9537 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
9538 at::functionalization::impl::commit_update(out0);
9539 at::functionalization::impl::sync(out0);
9540 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
9541 at::functionalization::impl::commit_update(out1);
9542 at::functionalization::impl::sync(out1);
9543 at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
9544 at::functionalization::impl::commit_update(out2);
9545 at::functionalization::impl::sync(out2);
9546 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
9547 }
9548 }
9549
9550 at::Tensor & where_out_self_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9551 if (false) {
9552 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9553 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9554 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9555 auto condition_meta = to_meta(condition);
9556 auto self_meta = to_meta(self);
9557 auto other_meta = to_meta(other);
9558 auto out_meta = to_meta(out);
9559 at::AutoDispatchSkipFunctionalize func_guard;
9560 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9561 at::_ops::where_self_out::call(condition_meta, self_meta, other_meta, out_meta);
9562 }
9563
9564 at::Tensor condition_;
9565 if (at::functionalization::impl::isFunctionalTensor(condition)) {
9566 at::functionalization::impl::sync(condition);
9567 condition_ = at::functionalization::impl::from_functional_tensor(condition);
9568 } else {
9569 condition_ = condition;
9570 }
9571
9572 at::Tensor self_;
9573 if (at::functionalization::impl::isFunctionalTensor(self)) {
9574 at::functionalization::impl::sync(self);
9575 self_ = at::functionalization::impl::from_functional_tensor(self);
9576 } else {
9577 self_ = self;
9578 }
9579
9580 at::Tensor other_;
9581 if (at::functionalization::impl::isFunctionalTensor(other)) {
9582 at::functionalization::impl::sync(other);
9583 other_ = at::functionalization::impl::from_functional_tensor(other);
9584 } else {
9585 other_ = other;
9586 }
9587
9588 at::Tensor out_;
9589 if (at::functionalization::impl::isFunctionalTensor(out)) {
9590 at::functionalization::impl::sync(out);
9591 out_ = at::functionalization::impl::from_functional_tensor(out);
9592 } else {
9593 out_ = out;
9594 }
9595 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
9596 if ((false || at::functionalization::impl::isFunctionalTensor(condition) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
9597 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9598 TORCH_INTERNAL_ASSERT(false,
9599 "mutating a non-functional tensor with a functional tensor is not allowed.",
9600 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9601 } else {
9602 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9603 at::AutoDispatchSkipFunctionalize guard;
9604 at::Tensor tmp_output = at::_ops::where_self_out::call(condition_, self_, other_, out_);
9605 return out;;
9606 }
9607 } else {
9608 at::Tensor tmp_output;
9609 {
9610 at::AutoDispatchSkipFunctionalize guard;
9611 tmp_output = at::_ops::where_self::call(condition_, self_, other_);
9612 }
9613 at::functionalization::impl::replace_(out, tmp_output);
9614 at::functionalization::impl::commit_update(out);
9615 at::functionalization::impl::sync(out);
9616 return out;
9617 }
9618 }
9619
9620 ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
9621 if (false) {
9622 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9623 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9624 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9625 auto grad_w_meta = to_meta(grad_w);
9626 auto saved_v_meta = to_meta(saved_v);
9627 auto saved_g_meta = to_meta(saved_g);
9628 auto saved_norms_meta = to_meta(saved_norms);
9629 auto out0_meta = to_meta(out0);
9630 auto out1_meta = to_meta(out1);
9631 at::AutoDispatchSkipFunctionalize func_guard;
9632 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9633 at::_ops::_weight_norm_interface_backward_out::call(grad_w_meta, saved_v_meta, saved_g_meta, saved_norms_meta, dim, out0_meta, out1_meta);
9634 }
9635
9636 at::Tensor grad_w_;
9637 if (at::functionalization::impl::isFunctionalTensor(grad_w)) {
9638 at::functionalization::impl::sync(grad_w);
9639 grad_w_ = at::functionalization::impl::from_functional_tensor(grad_w);
9640 } else {
9641 grad_w_ = grad_w;
9642 }
9643
9644 at::Tensor saved_v_;
9645 if (at::functionalization::impl::isFunctionalTensor(saved_v)) {
9646 at::functionalization::impl::sync(saved_v);
9647 saved_v_ = at::functionalization::impl::from_functional_tensor(saved_v);
9648 } else {
9649 saved_v_ = saved_v;
9650 }
9651
9652 at::Tensor saved_g_;
9653 if (at::functionalization::impl::isFunctionalTensor(saved_g)) {
9654 at::functionalization::impl::sync(saved_g);
9655 saved_g_ = at::functionalization::impl::from_functional_tensor(saved_g);
9656 } else {
9657 saved_g_ = saved_g;
9658 }
9659
9660 at::Tensor saved_norms_;
9661 if (at::functionalization::impl::isFunctionalTensor(saved_norms)) {
9662 at::functionalization::impl::sync(saved_norms);
9663 saved_norms_ = at::functionalization::impl::from_functional_tensor(saved_norms);
9664 } else {
9665 saved_norms_ = saved_norms;
9666 }
9667
9668 at::Tensor out0_;
9669 if (at::functionalization::impl::isFunctionalTensor(out0)) {
9670 at::functionalization::impl::sync(out0);
9671 out0_ = at::functionalization::impl::from_functional_tensor(out0);
9672 } else {
9673 out0_ = out0;
9674 }
9675
9676 at::Tensor out1_;
9677 if (at::functionalization::impl::isFunctionalTensor(out1)) {
9678 at::functionalization::impl::sync(out1);
9679 out1_ = at::functionalization::impl::from_functional_tensor(out1);
9680 } else {
9681 out1_ = out1;
9682 }
9683 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
9684 if ((false || at::functionalization::impl::isFunctionalTensor(grad_w) || at::functionalization::impl::isFunctionalTensor(saved_v) || at::functionalization::impl::isFunctionalTensor(saved_g) || at::functionalization::impl::isFunctionalTensor(saved_norms))) {
9685 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9686 TORCH_INTERNAL_ASSERT(false,
9687 "mutating a non-functional tensor with a functional tensor is not allowed.",
9688 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9689 } else {
9690 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9691 at::AutoDispatchSkipFunctionalize guard;
9692 ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_weight_norm_interface_backward_out::call(grad_w_, saved_v_, saved_g_, saved_norms_, dim, out0_, out1_);
9693 return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);;
9694 }
9695 } else {
9696 ::std::tuple<at::Tensor,at::Tensor> tmp_output;
9697 {
9698 at::AutoDispatchSkipFunctionalize guard;
9699 tmp_output = at::_ops::_weight_norm_interface_backward::call(grad_w_, saved_v_, saved_g_, saved_norms_, dim);
9700 }
9701 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
9702 at::functionalization::impl::commit_update(out0);
9703 at::functionalization::impl::sync(out0);
9704 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
9705 at::functionalization::impl::commit_update(out1);
9706 at::functionalization::impl::sync(out1);
9707 return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
9708 }
9709 }
9710
9711 at::Tensor & _sample_dirichlet_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) {
9712 if (false) {
9713 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9714 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9715 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9716 auto self_meta = to_meta(self);
9717 auto out_meta = to_meta(out);
9718 at::AutoDispatchSkipFunctionalize func_guard;
9719 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9720 at::_ops::_sample_dirichlet_out::call(self_meta, generator, out_meta);
9721 }
9722
9723 at::Tensor self_;
9724 if (at::functionalization::impl::isFunctionalTensor(self)) {
9725 at::functionalization::impl::sync(self);
9726 self_ = at::functionalization::impl::from_functional_tensor(self);
9727 } else {
9728 self_ = self;
9729 }
9730
9731 at::Tensor out_;
9732 if (at::functionalization::impl::isFunctionalTensor(out)) {
9733 at::functionalization::impl::sync(out);
9734 out_ = at::functionalization::impl::from_functional_tensor(out);
9735 } else {
9736 out_ = out;
9737 }
9738 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
9739 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
9740 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9741 TORCH_INTERNAL_ASSERT(false,
9742 "mutating a non-functional tensor with a functional tensor is not allowed.",
9743 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9744 } else {
9745 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9746 at::AutoDispatchSkipFunctionalize guard;
9747 at::Tensor tmp_output = at::_ops::_sample_dirichlet_out::call(self_, generator, out_);
9748 return out;;
9749 }
9750 } else {
9751 at::Tensor tmp_output;
9752 {
9753 at::AutoDispatchSkipFunctionalize guard;
9754 tmp_output = at::_ops::_sample_dirichlet::call(self_, generator);
9755 }
9756 at::functionalization::impl::replace_(out, tmp_output);
9757 at::functionalization::impl::commit_update(out);
9758 at::functionalization::impl::sync(out);
9759 return out;
9760 }
9761 }
9762
9763 at::Tensor & binomial_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator, at::Tensor & out) {
9764 if (false) {
9765 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9766 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9767 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9768 auto count_meta = to_meta(count);
9769 auto prob_meta = to_meta(prob);
9770 auto out_meta = to_meta(out);
9771 at::AutoDispatchSkipFunctionalize func_guard;
9772 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9773 at::_ops::binomial_out::call(count_meta, prob_meta, generator, out_meta);
9774 }
9775
9776 at::Tensor count_;
9777 if (at::functionalization::impl::isFunctionalTensor(count)) {
9778 at::functionalization::impl::sync(count);
9779 count_ = at::functionalization::impl::from_functional_tensor(count);
9780 } else {
9781 count_ = count;
9782 }
9783
9784 at::Tensor prob_;
9785 if (at::functionalization::impl::isFunctionalTensor(prob)) {
9786 at::functionalization::impl::sync(prob);
9787 prob_ = at::functionalization::impl::from_functional_tensor(prob);
9788 } else {
9789 prob_ = prob;
9790 }
9791
9792 at::Tensor out_;
9793 if (at::functionalization::impl::isFunctionalTensor(out)) {
9794 at::functionalization::impl::sync(out);
9795 out_ = at::functionalization::impl::from_functional_tensor(out);
9796 } else {
9797 out_ = out;
9798 }
9799 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
9800 if ((false || at::functionalization::impl::isFunctionalTensor(count) || at::functionalization::impl::isFunctionalTensor(prob))) {
9801 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9802 TORCH_INTERNAL_ASSERT(false,
9803 "mutating a non-functional tensor with a functional tensor is not allowed.",
9804 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9805 } else {
9806 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9807 at::AutoDispatchSkipFunctionalize guard;
9808 at::Tensor tmp_output = at::_ops::binomial_out::call(count_, prob_, generator, out_);
9809 return out;;
9810 }
9811 } else {
9812 at::Tensor tmp_output;
9813 {
9814 at::AutoDispatchSkipFunctionalize guard;
9815 tmp_output = at::_ops::binomial::call(count_, prob_, generator);
9816 }
9817 at::functionalization::impl::replace_(out, tmp_output);
9818 at::functionalization::impl::commit_update(out);
9819 at::functionalization::impl::sync(out);
9820 return out;
9821 }
9822 }
9823
9824 at::Tensor & native_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, at::Tensor & out) {
9825 if (false) {
9826 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9827 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9828 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9829 auto self_meta = to_meta(self);
9830 auto out_meta = to_meta(out);
9831 at::AutoDispatchSkipFunctionalize func_guard;
9832 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9833 at::_ops::native_norm_out::call(self_meta, p, out_meta);
9834 }
9835
9836 at::Tensor self_;
9837 if (at::functionalization::impl::isFunctionalTensor(self)) {
9838 at::functionalization::impl::sync(self);
9839 self_ = at::functionalization::impl::from_functional_tensor(self);
9840 } else {
9841 self_ = self;
9842 }
9843
9844 at::Tensor out_;
9845 if (at::functionalization::impl::isFunctionalTensor(out)) {
9846 at::functionalization::impl::sync(out);
9847 out_ = at::functionalization::impl::from_functional_tensor(out);
9848 } else {
9849 out_ = out;
9850 }
9851 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
9852 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
9853 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9854 TORCH_INTERNAL_ASSERT(false,
9855 "mutating a non-functional tensor with a functional tensor is not allowed.",
9856 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9857 } else {
9858 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9859 at::AutoDispatchSkipFunctionalize guard;
9860 at::Tensor tmp_output = at::_ops::native_norm_out::call(self_, p, out_);
9861 return out;;
9862 }
9863 } else {
9864 at::Tensor tmp_output;
9865 {
9866 at::AutoDispatchSkipFunctionalize guard;
9867 tmp_output = at::_ops::native_norm::call(self_, p);
9868 }
9869 at::functionalization::impl::replace_(out, tmp_output);
9870 at::functionalization::impl::commit_update(out);
9871 at::functionalization::impl::sync(out);
9872 return out;
9873 }
9874 }
9875
9876 at::Tensor & native_norm_out_ScalarOpt_dim_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
9877 if (false) {
9878 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9879 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9880 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9881 auto self_meta = to_meta(self);
9882 auto out_meta = to_meta(out);
9883 at::AutoDispatchSkipFunctionalize func_guard;
9884 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9885 at::_ops::native_norm_ScalarOpt_dim_dtype_out::call(self_meta, p, dim, keepdim, dtype, out_meta);
9886 }
9887
9888 at::Tensor self_;
9889 if (at::functionalization::impl::isFunctionalTensor(self)) {
9890 at::functionalization::impl::sync(self);
9891 self_ = at::functionalization::impl::from_functional_tensor(self);
9892 } else {
9893 self_ = self;
9894 }
9895
9896 at::Tensor out_;
9897 if (at::functionalization::impl::isFunctionalTensor(out)) {
9898 at::functionalization::impl::sync(out);
9899 out_ = at::functionalization::impl::from_functional_tensor(out);
9900 } else {
9901 out_ = out;
9902 }
9903 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
9904 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
9905 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9906 TORCH_INTERNAL_ASSERT(false,
9907 "mutating a non-functional tensor with a functional tensor is not allowed.",
9908 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9909 } else {
9910 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9911 at::AutoDispatchSkipFunctionalize guard;
9912 at::Tensor tmp_output = at::_ops::native_norm_ScalarOpt_dim_dtype_out::call(self_, p, dim, keepdim, dtype, out_);
9913 return out;;
9914 }
9915 } else {
9916 at::Tensor tmp_output;
9917 {
9918 at::AutoDispatchSkipFunctionalize guard;
9919 tmp_output = at::_ops::native_norm_ScalarOpt_dim_dtype::call(self_, p, dim, keepdim, dtype);
9920 }
9921 at::functionalization::impl::replace_(out, tmp_output);
9922 at::functionalization::impl::commit_update(out);
9923 at::functionalization::impl::sync(out);
9924 return out;
9925 }
9926 }
9927
9928 at::Tensor & _sparse_sum_out_dim_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
9929 if (false) {
9930 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9931 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9932 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9933 auto self_meta = to_meta(self);
9934 auto out_meta = to_meta(out);
9935 at::AutoDispatchSkipFunctionalize func_guard;
9936 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9937 at::_ops::_sparse_sum_dim_out::call(self_meta, dim, out_meta);
9938 }
9939
9940 at::Tensor self_;
9941 if (at::functionalization::impl::isFunctionalTensor(self)) {
9942 at::functionalization::impl::sync(self);
9943 self_ = at::functionalization::impl::from_functional_tensor(self);
9944 } else {
9945 self_ = self;
9946 }
9947
9948 at::Tensor out_;
9949 if (at::functionalization::impl::isFunctionalTensor(out)) {
9950 at::functionalization::impl::sync(out);
9951 out_ = at::functionalization::impl::from_functional_tensor(out);
9952 } else {
9953 out_ = out;
9954 }
9955 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
9956 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
9957 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
9958 TORCH_INTERNAL_ASSERT(false,
9959 "mutating a non-functional tensor with a functional tensor is not allowed.",
9960 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
9961 } else {
9962 // case 2: arguments are not functional tensors, so we no-op and redispatch.
9963 at::AutoDispatchSkipFunctionalize guard;
9964 at::Tensor tmp_output = at::_ops::_sparse_sum_dim_out::call(self_, dim, out_);
9965 return out;;
9966 }
9967 } else {
9968 at::Tensor tmp_output;
9969 {
9970 at::AutoDispatchSkipFunctionalize guard;
9971 tmp_output = at::_ops::_sparse_sum_dim::call(self_, dim);
9972 }
9973 at::functionalization::impl::replace_(out, tmp_output);
9974 at::functionalization::impl::commit_update(out);
9975 at::functionalization::impl::sync(out);
9976 return out;
9977 }
9978 }
9979
9980 at::Tensor & _sparse_sum_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
9981 if (false) {
9982 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
9983 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
9984 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
9985 auto grad_meta = to_meta(grad);
9986 auto self_meta = to_meta(self);
9987 auto out_meta = to_meta(out);
9988 at::AutoDispatchSkipFunctionalize func_guard;
9989 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
9990 at::_ops::_sparse_sum_backward_out::call(grad_meta, self_meta, dim, out_meta);
9991 }
9992
9993 at::Tensor grad_;
9994 if (at::functionalization::impl::isFunctionalTensor(grad)) {
9995 at::functionalization::impl::sync(grad);
9996 grad_ = at::functionalization::impl::from_functional_tensor(grad);
9997 } else {
9998 grad_ = grad;
9999 }
10000
10001 at::Tensor self_;
10002 if (at::functionalization::impl::isFunctionalTensor(self)) {
10003 at::functionalization::impl::sync(self);
10004 self_ = at::functionalization::impl::from_functional_tensor(self);
10005 } else {
10006 self_ = self;
10007 }
10008
10009 at::Tensor out_;
10010 if (at::functionalization::impl::isFunctionalTensor(out)) {
10011 at::functionalization::impl::sync(out);
10012 out_ = at::functionalization::impl::from_functional_tensor(out);
10013 } else {
10014 out_ = out;
10015 }
10016 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10017 if ((false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(self))) {
10018 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10019 TORCH_INTERNAL_ASSERT(false,
10020 "mutating a non-functional tensor with a functional tensor is not allowed.",
10021 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10022 } else {
10023 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10024 at::AutoDispatchSkipFunctionalize guard;
10025 at::Tensor tmp_output = at::_ops::_sparse_sum_backward_out::call(grad_, self_, dim, out_);
10026 return out;;
10027 }
10028 } else {
10029 at::Tensor tmp_output;
10030 {
10031 at::AutoDispatchSkipFunctionalize guard;
10032 tmp_output = at::_ops::_sparse_sum_backward::call(grad_, self_, dim);
10033 }
10034 at::functionalization::impl::replace_(out, tmp_output);
10035 at::functionalization::impl::commit_update(out);
10036 at::functionalization::impl::sync(out);
10037 return out;
10038 }
10039 }
10040
10041 at::Tensor & _sparse_softmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
10042 if (false) {
10043 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10044 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10045 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10046 auto self_meta = to_meta(self);
10047 auto out_meta = to_meta(out);
10048 at::AutoDispatchSkipFunctionalize func_guard;
10049 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10050 at::_ops::_sparse_softmax_out::call(self_meta, dim, half_to_float, out_meta);
10051 }
10052
10053 at::Tensor self_;
10054 if (at::functionalization::impl::isFunctionalTensor(self)) {
10055 at::functionalization::impl::sync(self);
10056 self_ = at::functionalization::impl::from_functional_tensor(self);
10057 } else {
10058 self_ = self;
10059 }
10060
10061 at::Tensor out_;
10062 if (at::functionalization::impl::isFunctionalTensor(out)) {
10063 at::functionalization::impl::sync(out);
10064 out_ = at::functionalization::impl::from_functional_tensor(out);
10065 } else {
10066 out_ = out;
10067 }
10068 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10069 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
10070 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10071 TORCH_INTERNAL_ASSERT(false,
10072 "mutating a non-functional tensor with a functional tensor is not allowed.",
10073 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10074 } else {
10075 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10076 at::AutoDispatchSkipFunctionalize guard;
10077 at::Tensor tmp_output = at::_ops::_sparse_softmax_out::call(self_, dim, half_to_float, out_);
10078 return out;;
10079 }
10080 } else {
10081 at::Tensor tmp_output;
10082 {
10083 at::AutoDispatchSkipFunctionalize guard;
10084 tmp_output = at::_ops::_sparse_softmax::call(self_, dim, half_to_float);
10085 }
10086 at::functionalization::impl::replace_(out, tmp_output);
10087 at::functionalization::impl::commit_update(out);
10088 at::functionalization::impl::sync(out);
10089 return out;
10090 }
10091 }
10092
10093 at::Tensor & clone_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
10094 if (false) {
10095 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10096 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10097 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10098 auto self_meta = to_meta(self);
10099 auto out_meta = to_meta(out);
10100 at::AutoDispatchSkipFunctionalize func_guard;
10101 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10102 at::_ops::clone_out::call(self_meta, memory_format, out_meta);
10103 }
10104
10105 at::Tensor self_;
10106 if (at::functionalization::impl::isFunctionalTensor(self)) {
10107 at::functionalization::impl::sync(self);
10108 self_ = at::functionalization::impl::from_functional_tensor(self);
10109 } else {
10110 self_ = self;
10111 }
10112
10113 at::Tensor out_;
10114 if (at::functionalization::impl::isFunctionalTensor(out)) {
10115 at::functionalization::impl::sync(out);
10116 out_ = at::functionalization::impl::from_functional_tensor(out);
10117 } else {
10118 out_ = out;
10119 }
10120 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10121 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
10122 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10123 TORCH_INTERNAL_ASSERT(false,
10124 "mutating a non-functional tensor with a functional tensor is not allowed.",
10125 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10126 } else {
10127 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10128 at::AutoDispatchSkipFunctionalize guard;
10129 at::Tensor tmp_output = at::_ops::clone_out::call(self_, memory_format, out_);
10130 return out;;
10131 }
10132 } else {
10133 at::Tensor tmp_output;
10134 {
10135 at::AutoDispatchSkipFunctionalize guard;
10136 tmp_output = at::_ops::clone::call(self_, memory_format);
10137 }
10138 at::functionalization::impl::replace_(out, tmp_output);
10139 at::functionalization::impl::commit_update(out);
10140 at::functionalization::impl::sync(out);
10141 return out;
10142 }
10143 }
10144
10145 const at::Tensor & resize_as_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
10146 if (false) {
10147 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10148 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10149 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10150 auto self_meta = to_meta(self);
10151 auto the_template_meta = to_meta(the_template);
10152 auto out_meta = to_meta(out);
10153 at::AutoDispatchSkipFunctionalize func_guard;
10154 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10155 at::_ops::resize_as_out::call(self_meta, the_template_meta, memory_format, out_meta);
10156 }
10157
10158 at::Tensor self_;
10159 if (at::functionalization::impl::isFunctionalTensor(self)) {
10160 at::functionalization::impl::sync(self);
10161 self_ = at::functionalization::impl::from_functional_tensor(self);
10162 } else {
10163 self_ = self;
10164 }
10165
10166 at::Tensor the_template_;
10167 if (at::functionalization::impl::isFunctionalTensor(the_template)) {
10168 at::functionalization::impl::sync(the_template);
10169 the_template_ = at::functionalization::impl::from_functional_tensor(the_template);
10170 } else {
10171 the_template_ = the_template;
10172 }
10173
10174 at::Tensor out_;
10175 if (at::functionalization::impl::isFunctionalTensor(out)) {
10176 at::functionalization::impl::sync(out);
10177 out_ = at::functionalization::impl::from_functional_tensor(out);
10178 } else {
10179 out_ = out;
10180 }
10181 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10182 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(the_template))) {
10183 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10184 TORCH_INTERNAL_ASSERT(false,
10185 "mutating a non-functional tensor with a functional tensor is not allowed.",
10186 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10187 } else {
10188 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10189 at::AutoDispatchSkipFunctionalize guard;
10190 at::Tensor tmp_output = at::_ops::resize_as_out::call(self_, the_template_, memory_format, out_);
10191 return out;;
10192 }
10193 } else {
10194 at::Tensor tmp_output;
10195 {
10196 at::AutoDispatchSkipFunctionalize guard;
10197 tmp_output = at::_ops::resize_as::call(self_, the_template_, memory_format);
10198 }
10199 at::functionalization::impl::replace_(out, tmp_output);
10200 at::functionalization::impl::commit_update(out);
10201 at::functionalization::impl::sync(out);
10202 return out;
10203 }
10204 }
10205
10206 const at::Tensor & resize_as_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) {
10207 if (true) {
10208 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10209 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10210 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10211 auto self_meta = to_meta(self);
10212 auto the_template_meta = to_meta(the_template);
10213 at::AutoDispatchSkipFunctionalize func_guard;
10214 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10215 at::_ops::resize_as_::call(self_meta, the_template_meta, memory_format);
10216 }
10217
10218 at::Tensor self_;
10219 if (at::functionalization::impl::isFunctionalTensor(self)) {
10220 at::functionalization::impl::sync(self);
10221 self_ = at::functionalization::impl::from_functional_tensor(self);
10222 } else {
10223 self_ = self;
10224 }
10225
10226 at::Tensor the_template_;
10227 if (at::functionalization::impl::isFunctionalTensor(the_template)) {
10228 at::functionalization::impl::sync(the_template);
10229 the_template_ = at::functionalization::impl::from_functional_tensor(the_template);
10230 } else {
10231 the_template_ = the_template;
10232 }
10233 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
10234 if ((false || at::functionalization::impl::isFunctionalTensor(the_template))) {
10235 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10236 TORCH_INTERNAL_ASSERT(false,
10237 "mutating a non-functional tensor with a functional tensor is not allowed.",
10238 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10239 } else {
10240 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10241 at::AutoDispatchSkipFunctionalize guard;
10242 at::Tensor tmp_output = at::_ops::resize_as_::call(self_, the_template_, memory_format);
10243 return self;;
10244 }
10245 } else {
10246 at::Tensor tmp_output;
10247 {
10248 at::AutoDispatchSkipFunctionalize guard;
10249 tmp_output = at::_ops::resize_as::call(self_, the_template_, memory_format);
10250 }
10251 at::functionalization::impl::replace_(self, tmp_output);
10252 at::functionalization::impl::commit_update(self);
10253 at::functionalization::impl::sync(self);
10254 return self;
10255 }
10256 }
10257
10258 at::Tensor & zero_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
10259 if (false) {
10260 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10261 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10262 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10263 auto self_meta = to_meta(self);
10264 auto out_meta = to_meta(out);
10265 at::AutoDispatchSkipFunctionalize func_guard;
10266 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10267 at::_ops::zero_out::call(self_meta, out_meta);
10268 }
10269
10270 at::Tensor self_;
10271 if (at::functionalization::impl::isFunctionalTensor(self)) {
10272 at::functionalization::impl::sync(self);
10273 self_ = at::functionalization::impl::from_functional_tensor(self);
10274 } else {
10275 self_ = self;
10276 }
10277
10278 at::Tensor out_;
10279 if (at::functionalization::impl::isFunctionalTensor(out)) {
10280 at::functionalization::impl::sync(out);
10281 out_ = at::functionalization::impl::from_functional_tensor(out);
10282 } else {
10283 out_ = out;
10284 }
10285 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10286 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
10287 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10288 TORCH_INTERNAL_ASSERT(false,
10289 "mutating a non-functional tensor with a functional tensor is not allowed.",
10290 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10291 } else {
10292 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10293 at::AutoDispatchSkipFunctionalize guard;
10294 at::Tensor tmp_output = at::_ops::zero_out::call(self_, out_);
10295 return out;;
10296 }
10297 } else {
10298 at::Tensor tmp_output;
10299 {
10300 at::AutoDispatchSkipFunctionalize guard;
10301 tmp_output = at::_ops::zero::call(self_);
10302 }
10303 at::functionalization::impl::replace_(out, tmp_output);
10304 at::functionalization::impl::commit_update(out);
10305 at::functionalization::impl::sync(out);
10306 return out;
10307 }
10308 }
10309
10310 at::Tensor & zero_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
10311 if (true) {
10312 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10313 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10314 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10315 auto self_meta = to_meta(self);
10316 at::AutoDispatchSkipFunctionalize func_guard;
10317 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10318 at::_ops::zero_::call(self_meta);
10319 }
10320
10321 at::Tensor self_;
10322 if (at::functionalization::impl::isFunctionalTensor(self)) {
10323 at::functionalization::impl::sync(self);
10324 self_ = at::functionalization::impl::from_functional_tensor(self);
10325 } else {
10326 self_ = self;
10327 }
10328 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
10329 if ((false)) {
10330 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10331 TORCH_INTERNAL_ASSERT(false,
10332 "mutating a non-functional tensor with a functional tensor is not allowed.",
10333 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10334 } else {
10335 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10336 at::AutoDispatchSkipFunctionalize guard;
10337 at::Tensor tmp_output = at::_ops::zero_::call(self_);
10338 return self;;
10339 }
10340 } else {
10341 at::Tensor tmp_output;
10342 {
10343 at::AutoDispatchSkipFunctionalize guard;
10344 tmp_output = at::_ops::zero::call(self_);
10345 }
10346 at::functionalization::impl::replace_(self, tmp_output);
10347 at::functionalization::impl::commit_update(self);
10348 at::functionalization::impl::sync(self);
10349 return self;
10350 }
10351 }
10352
10353 at::Tensor & heaviside_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values, at::Tensor & out) {
10354 if (false) {
10355 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10356 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10357 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10358 auto self_meta = to_meta(self);
10359 auto values_meta = to_meta(values);
10360 auto out_meta = to_meta(out);
10361 at::AutoDispatchSkipFunctionalize func_guard;
10362 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10363 at::_ops::heaviside_out::call(self_meta, values_meta, out_meta);
10364 }
10365
10366 at::Tensor self_;
10367 if (at::functionalization::impl::isFunctionalTensor(self)) {
10368 at::functionalization::impl::sync(self);
10369 self_ = at::functionalization::impl::from_functional_tensor(self);
10370 } else {
10371 self_ = self;
10372 }
10373
10374 at::Tensor values_;
10375 if (at::functionalization::impl::isFunctionalTensor(values)) {
10376 at::functionalization::impl::sync(values);
10377 values_ = at::functionalization::impl::from_functional_tensor(values);
10378 } else {
10379 values_ = values;
10380 }
10381
10382 at::Tensor out_;
10383 if (at::functionalization::impl::isFunctionalTensor(out)) {
10384 at::functionalization::impl::sync(out);
10385 out_ = at::functionalization::impl::from_functional_tensor(out);
10386 } else {
10387 out_ = out;
10388 }
10389 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10390 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(values))) {
10391 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10392 TORCH_INTERNAL_ASSERT(false,
10393 "mutating a non-functional tensor with a functional tensor is not allowed.",
10394 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10395 } else {
10396 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10397 at::AutoDispatchSkipFunctionalize guard;
10398 at::Tensor tmp_output = at::_ops::heaviside_out::call(self_, values_, out_);
10399 return out;;
10400 }
10401 } else {
10402 at::Tensor tmp_output;
10403 {
10404 at::AutoDispatchSkipFunctionalize guard;
10405 tmp_output = at::_ops::heaviside::call(self_, values_);
10406 }
10407 at::functionalization::impl::replace_(out, tmp_output);
10408 at::functionalization::impl::commit_update(out);
10409 at::functionalization::impl::sync(out);
10410 return out;
10411 }
10412 }
10413
10414 at::Tensor & heaviside_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & values) {
10415 if (true) {
10416 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10417 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10418 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10419 auto self_meta = to_meta(self);
10420 auto values_meta = to_meta(values);
10421 at::AutoDispatchSkipFunctionalize func_guard;
10422 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10423 at::_ops::heaviside_::call(self_meta, values_meta);
10424 }
10425
10426 at::Tensor self_;
10427 if (at::functionalization::impl::isFunctionalTensor(self)) {
10428 at::functionalization::impl::sync(self);
10429 self_ = at::functionalization::impl::from_functional_tensor(self);
10430 } else {
10431 self_ = self;
10432 }
10433
10434 at::Tensor values_;
10435 if (at::functionalization::impl::isFunctionalTensor(values)) {
10436 at::functionalization::impl::sync(values);
10437 values_ = at::functionalization::impl::from_functional_tensor(values);
10438 } else {
10439 values_ = values;
10440 }
10441 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
10442 if ((false || at::functionalization::impl::isFunctionalTensor(values))) {
10443 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10444 TORCH_INTERNAL_ASSERT(false,
10445 "mutating a non-functional tensor with a functional tensor is not allowed.",
10446 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10447 } else {
10448 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10449 at::AutoDispatchSkipFunctionalize guard;
10450 at::Tensor tmp_output = at::_ops::heaviside_::call(self_, values_);
10451 return self;;
10452 }
10453 } else {
10454 at::Tensor tmp_output;
10455 {
10456 at::AutoDispatchSkipFunctionalize guard;
10457 tmp_output = at::_ops::heaviside::call(self_, values_);
10458 }
10459 at::functionalization::impl::replace_(self, tmp_output);
10460 at::functionalization::impl::commit_update(self);
10461 at::functionalization::impl::sync(self);
10462 return self;
10463 }
10464 }
10465
10466 at::Tensor & addmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
10467 if (false) {
10468 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10469 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10470 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10471 auto self_meta = to_meta(self);
10472 auto mat1_meta = to_meta(mat1);
10473 auto mat2_meta = to_meta(mat2);
10474 auto out_meta = to_meta(out);
10475 at::AutoDispatchSkipFunctionalize func_guard;
10476 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10477 at::_ops::addmm_out::call(self_meta, mat1_meta, mat2_meta, beta, alpha, out_meta);
10478 }
10479
10480 at::Tensor self_;
10481 if (at::functionalization::impl::isFunctionalTensor(self)) {
10482 at::functionalization::impl::sync(self);
10483 self_ = at::functionalization::impl::from_functional_tensor(self);
10484 } else {
10485 self_ = self;
10486 }
10487
10488 at::Tensor mat1_;
10489 if (at::functionalization::impl::isFunctionalTensor(mat1)) {
10490 at::functionalization::impl::sync(mat1);
10491 mat1_ = at::functionalization::impl::from_functional_tensor(mat1);
10492 } else {
10493 mat1_ = mat1;
10494 }
10495
10496 at::Tensor mat2_;
10497 if (at::functionalization::impl::isFunctionalTensor(mat2)) {
10498 at::functionalization::impl::sync(mat2);
10499 mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
10500 } else {
10501 mat2_ = mat2;
10502 }
10503
10504 at::Tensor out_;
10505 if (at::functionalization::impl::isFunctionalTensor(out)) {
10506 at::functionalization::impl::sync(out);
10507 out_ = at::functionalization::impl::from_functional_tensor(out);
10508 } else {
10509 out_ = out;
10510 }
10511 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10512 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) {
10513 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10514 TORCH_INTERNAL_ASSERT(false,
10515 "mutating a non-functional tensor with a functional tensor is not allowed.",
10516 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10517 } else {
10518 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10519 at::AutoDispatchSkipFunctionalize guard;
10520 at::Tensor tmp_output = at::_ops::addmm_out::call(self_, mat1_, mat2_, beta, alpha, out_);
10521 return out;;
10522 }
10523 } else {
10524 at::Tensor tmp_output;
10525 {
10526 at::AutoDispatchSkipFunctionalize guard;
10527 tmp_output = at::_ops::addmm::call(self_, mat1_, mat2_, beta, alpha);
10528 }
10529 at::functionalization::impl::replace_(out, tmp_output);
10530 at::functionalization::impl::commit_update(out);
10531 at::functionalization::impl::sync(out);
10532 return out;
10533 }
10534 }
10535
10536 at::Tensor & addmm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
10537 if (true) {
10538 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10539 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10540 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10541 auto self_meta = to_meta(self);
10542 auto mat1_meta = to_meta(mat1);
10543 auto mat2_meta = to_meta(mat2);
10544 at::AutoDispatchSkipFunctionalize func_guard;
10545 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10546 at::_ops::addmm_::call(self_meta, mat1_meta, mat2_meta, beta, alpha);
10547 }
10548
10549 at::Tensor self_;
10550 if (at::functionalization::impl::isFunctionalTensor(self)) {
10551 at::functionalization::impl::sync(self);
10552 self_ = at::functionalization::impl::from_functional_tensor(self);
10553 } else {
10554 self_ = self;
10555 }
10556
10557 at::Tensor mat1_;
10558 if (at::functionalization::impl::isFunctionalTensor(mat1)) {
10559 at::functionalization::impl::sync(mat1);
10560 mat1_ = at::functionalization::impl::from_functional_tensor(mat1);
10561 } else {
10562 mat1_ = mat1;
10563 }
10564
10565 at::Tensor mat2_;
10566 if (at::functionalization::impl::isFunctionalTensor(mat2)) {
10567 at::functionalization::impl::sync(mat2);
10568 mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
10569 } else {
10570 mat2_ = mat2;
10571 }
10572 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
10573 if ((false || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) {
10574 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10575 TORCH_INTERNAL_ASSERT(false,
10576 "mutating a non-functional tensor with a functional tensor is not allowed.",
10577 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10578 } else {
10579 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10580 at::AutoDispatchSkipFunctionalize guard;
10581 at::Tensor tmp_output = at::_ops::addmm_::call(self_, mat1_, mat2_, beta, alpha);
10582 return self;;
10583 }
10584 } else {
10585 at::Tensor tmp_output;
10586 {
10587 at::AutoDispatchSkipFunctionalize guard;
10588 tmp_output = at::_ops::addmm::call(self_, mat1_, mat2_, beta, alpha);
10589 }
10590 at::functionalization::impl::replace_(self, tmp_output);
10591 at::functionalization::impl::commit_update(self);
10592 at::functionalization::impl::sync(self);
10593 return self;
10594 }
10595 }
10596
10597 at::Tensor & _sparse_coo_tensor_with_dims_out_out(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out) {
10598 if (false) {
10599 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10600 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10601 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10602 auto out_meta = to_meta(out);
10603 at::AutoDispatchSkipFunctionalize func_guard;
10604 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10605 at::_ops::_sparse_coo_tensor_with_dims_out::call(sparse_dim, dense_dim, size, out_meta);
10606 }
10607
10608 at::Tensor out_;
10609 if (at::functionalization::impl::isFunctionalTensor(out)) {
10610 at::functionalization::impl::sync(out);
10611 out_ = at::functionalization::impl::from_functional_tensor(out);
10612 } else {
10613 out_ = out;
10614 }
10615 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10616 if ((false)) {
10617 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10618 TORCH_INTERNAL_ASSERT(false,
10619 "mutating a non-functional tensor with a functional tensor is not allowed.",
10620 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10621 } else {
10622 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10623 at::AutoDispatchSkipFunctionalize guard;
10624 at::Tensor tmp_output = at::_ops::_sparse_coo_tensor_with_dims_out::call(sparse_dim, dense_dim, size, out_);
10625 return out;;
10626 }
10627 } else {
10628 at::Tensor tmp_output;
10629 {
10630 at::AutoDispatchSkipFunctionalize guard;
10631 tmp_output = at::_ops::_sparse_coo_tensor_with_dims::call(sparse_dim, dense_dim, size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
10632 }
10633 at::functionalization::impl::replace_(out, tmp_output);
10634 at::functionalization::impl::commit_update(out);
10635 at::functionalization::impl::sync(out);
10636 return out;
10637 }
10638 }
10639
10640 at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out_out(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::Tensor & out) {
10641 if (false) {
10642 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10643 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10644 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10645 auto indices_meta = to_meta(indices);
10646 auto values_meta = to_meta(values);
10647 auto out_meta = to_meta(out);
10648 at::AutoDispatchSkipFunctionalize func_guard;
10649 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10650 at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices_meta, values_meta, out_meta);
10651 }
10652
10653 at::Tensor indices_;
10654 if (at::functionalization::impl::isFunctionalTensor(indices)) {
10655 at::functionalization::impl::sync(indices);
10656 indices_ = at::functionalization::impl::from_functional_tensor(indices);
10657 } else {
10658 indices_ = indices;
10659 }
10660
10661 at::Tensor values_;
10662 if (at::functionalization::impl::isFunctionalTensor(values)) {
10663 at::functionalization::impl::sync(values);
10664 values_ = at::functionalization::impl::from_functional_tensor(values);
10665 } else {
10666 values_ = values;
10667 }
10668
10669 at::Tensor out_;
10670 if (at::functionalization::impl::isFunctionalTensor(out)) {
10671 at::functionalization::impl::sync(out);
10672 out_ = at::functionalization::impl::from_functional_tensor(out);
10673 } else {
10674 out_ = out;
10675 }
10676 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10677 if ((false || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(values))) {
10678 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10679 TORCH_INTERNAL_ASSERT(false,
10680 "mutating a non-functional tensor with a functional tensor is not allowed.",
10681 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10682 } else {
10683 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10684 at::AutoDispatchSkipFunctionalize guard;
10685 at::Tensor tmp_output = at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices_, values_, out_);
10686 return out;;
10687 }
10688 } else {
10689 at::Tensor tmp_output;
10690 {
10691 at::AutoDispatchSkipFunctionalize guard;
10692 tmp_output = at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices_, values_, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
10693 }
10694 at::functionalization::impl::replace_(out, tmp_output);
10695 at::functionalization::impl::commit_update(out);
10696 at::functionalization::impl::sync(out);
10697 return out;
10698 }
10699 }
10700
10701 const at::Tensor & sparse_resize_and_clear_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) {
10702 if (false) {
10703 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10704 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10705 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10706 auto self_meta = to_meta(self);
10707 auto out_meta = to_meta(out);
10708 at::AutoDispatchSkipFunctionalize func_guard;
10709 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10710 at::_ops::sparse_resize_and_clear_out::call(self_meta, size, sparse_dim, dense_dim, out_meta);
10711 }
10712
10713 at::Tensor self_;
10714 if (at::functionalization::impl::isFunctionalTensor(self)) {
10715 at::functionalization::impl::sync(self);
10716 self_ = at::functionalization::impl::from_functional_tensor(self);
10717 } else {
10718 self_ = self;
10719 }
10720
10721 at::Tensor out_;
10722 if (at::functionalization::impl::isFunctionalTensor(out)) {
10723 at::functionalization::impl::sync(out);
10724 out_ = at::functionalization::impl::from_functional_tensor(out);
10725 } else {
10726 out_ = out;
10727 }
10728 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10729 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
10730 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10731 TORCH_INTERNAL_ASSERT(false,
10732 "mutating a non-functional tensor with a functional tensor is not allowed.",
10733 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10734 } else {
10735 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10736 at::AutoDispatchSkipFunctionalize guard;
10737 at::Tensor tmp_output = at::_ops::sparse_resize_and_clear_out::call(self_, size, sparse_dim, dense_dim, out_);
10738 return out;;
10739 }
10740 } else {
10741 at::Tensor tmp_output;
10742 {
10743 at::AutoDispatchSkipFunctionalize guard;
10744 tmp_output = at::_ops::sparse_resize_and_clear::call(self_, size, sparse_dim, dense_dim);
10745 }
10746 at::functionalization::impl::replace_(out, tmp_output);
10747 at::functionalization::impl::commit_update(out);
10748 at::functionalization::impl::sync(out);
10749 return out;
10750 }
10751 }
10752
10753 const at::Tensor & sparse_resize_and_clear_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
10754 if (true) {
10755 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10756 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10757 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10758 auto self_meta = to_meta(self);
10759 at::AutoDispatchSkipFunctionalize func_guard;
10760 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10761 at::_ops::sparse_resize_and_clear_::call(self_meta, size, sparse_dim, dense_dim);
10762 }
10763
10764 at::Tensor self_;
10765 if (at::functionalization::impl::isFunctionalTensor(self)) {
10766 at::functionalization::impl::sync(self);
10767 self_ = at::functionalization::impl::from_functional_tensor(self);
10768 } else {
10769 self_ = self;
10770 }
10771 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
10772 if ((false)) {
10773 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10774 TORCH_INTERNAL_ASSERT(false,
10775 "mutating a non-functional tensor with a functional tensor is not allowed.",
10776 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10777 } else {
10778 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10779 at::AutoDispatchSkipFunctionalize guard;
10780 at::Tensor tmp_output = at::_ops::sparse_resize_and_clear_::call(self_, size, sparse_dim, dense_dim);
10781 return self;;
10782 }
10783 } else {
10784 at::Tensor tmp_output;
10785 {
10786 at::AutoDispatchSkipFunctionalize guard;
10787 tmp_output = at::_ops::sparse_resize_and_clear::call(self_, size, sparse_dim, dense_dim);
10788 }
10789 at::functionalization::impl::replace_(self, tmp_output);
10790 at::functionalization::impl::commit_update(self);
10791 at::functionalization::impl::sync(self);
10792 return self;
10793 }
10794 }
10795
10796 at::Tensor & hspmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
10797 if (false) {
10798 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10799 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10800 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10801 auto mat1_meta = to_meta(mat1);
10802 auto mat2_meta = to_meta(mat2);
10803 auto out_meta = to_meta(out);
10804 at::AutoDispatchSkipFunctionalize func_guard;
10805 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10806 at::_ops::hspmm_out::call(mat1_meta, mat2_meta, out_meta);
10807 }
10808
10809 at::Tensor mat1_;
10810 if (at::functionalization::impl::isFunctionalTensor(mat1)) {
10811 at::functionalization::impl::sync(mat1);
10812 mat1_ = at::functionalization::impl::from_functional_tensor(mat1);
10813 } else {
10814 mat1_ = mat1;
10815 }
10816
10817 at::Tensor mat2_;
10818 if (at::functionalization::impl::isFunctionalTensor(mat2)) {
10819 at::functionalization::impl::sync(mat2);
10820 mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
10821 } else {
10822 mat2_ = mat2;
10823 }
10824
10825 at::Tensor out_;
10826 if (at::functionalization::impl::isFunctionalTensor(out)) {
10827 at::functionalization::impl::sync(out);
10828 out_ = at::functionalization::impl::from_functional_tensor(out);
10829 } else {
10830 out_ = out;
10831 }
10832 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10833 if ((false || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) {
10834 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10835 TORCH_INTERNAL_ASSERT(false,
10836 "mutating a non-functional tensor with a functional tensor is not allowed.",
10837 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10838 } else {
10839 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10840 at::AutoDispatchSkipFunctionalize guard;
10841 at::Tensor tmp_output = at::_ops::hspmm_out::call(mat1_, mat2_, out_);
10842 return out;;
10843 }
10844 } else {
10845 at::Tensor tmp_output;
10846 {
10847 at::AutoDispatchSkipFunctionalize guard;
10848 tmp_output = at::_ops::hspmm::call(mat1_, mat2_);
10849 }
10850 at::functionalization::impl::replace_(out, tmp_output);
10851 at::functionalization::impl::commit_update(out);
10852 at::functionalization::impl::sync(out);
10853 return out;
10854 }
10855 }
10856
10857 at::Tensor & to_sparse_out_sparse_dim_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sparse_dim, at::Tensor & out) {
10858 if (false) {
10859 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10860 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10861 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10862 auto self_meta = to_meta(self);
10863 auto out_meta = to_meta(out);
10864 at::AutoDispatchSkipFunctionalize func_guard;
10865 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10866 at::_ops::to_sparse_sparse_dim_out::call(self_meta, sparse_dim, out_meta);
10867 }
10868
10869 at::Tensor self_;
10870 if (at::functionalization::impl::isFunctionalTensor(self)) {
10871 at::functionalization::impl::sync(self);
10872 self_ = at::functionalization::impl::from_functional_tensor(self);
10873 } else {
10874 self_ = self;
10875 }
10876
10877 at::Tensor out_;
10878 if (at::functionalization::impl::isFunctionalTensor(out)) {
10879 at::functionalization::impl::sync(out);
10880 out_ = at::functionalization::impl::from_functional_tensor(out);
10881 } else {
10882 out_ = out;
10883 }
10884 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10885 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
10886 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10887 TORCH_INTERNAL_ASSERT(false,
10888 "mutating a non-functional tensor with a functional tensor is not allowed.",
10889 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10890 } else {
10891 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10892 at::AutoDispatchSkipFunctionalize guard;
10893 at::Tensor tmp_output = at::_ops::to_sparse_sparse_dim_out::call(self_, sparse_dim, out_);
10894 return out;;
10895 }
10896 } else {
10897 at::Tensor tmp_output;
10898 {
10899 at::AutoDispatchSkipFunctionalize guard;
10900 tmp_output = at::_ops::to_sparse_sparse_dim::call(self_, sparse_dim);
10901 }
10902 at::functionalization::impl::replace_(out, tmp_output);
10903 at::functionalization::impl::commit_update(out);
10904 at::functionalization::impl::sync(out);
10905 return out;
10906 }
10907 }
10908
10909 at::Tensor & to_sparse_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) {
10910 if (false) {
10911 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10912 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10913 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10914 auto self_meta = to_meta(self);
10915 auto out_meta = to_meta(out);
10916 at::AutoDispatchSkipFunctionalize func_guard;
10917 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10918 at::_ops::to_sparse_out::call(self_meta, layout, blocksize, dense_dim, out_meta);
10919 }
10920
10921 at::Tensor self_;
10922 if (at::functionalization::impl::isFunctionalTensor(self)) {
10923 at::functionalization::impl::sync(self);
10924 self_ = at::functionalization::impl::from_functional_tensor(self);
10925 } else {
10926 self_ = self;
10927 }
10928
10929 at::Tensor out_;
10930 if (at::functionalization::impl::isFunctionalTensor(out)) {
10931 at::functionalization::impl::sync(out);
10932 out_ = at::functionalization::impl::from_functional_tensor(out);
10933 } else {
10934 out_ = out;
10935 }
10936 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10937 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
10938 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10939 TORCH_INTERNAL_ASSERT(false,
10940 "mutating a non-functional tensor with a functional tensor is not allowed.",
10941 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10942 } else {
10943 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10944 at::AutoDispatchSkipFunctionalize guard;
10945 at::Tensor tmp_output = at::_ops::to_sparse_out::call(self_, layout, blocksize, dense_dim, out_);
10946 return out;;
10947 }
10948 } else {
10949 at::Tensor tmp_output;
10950 {
10951 at::AutoDispatchSkipFunctionalize guard;
10952 tmp_output = at::_ops::to_sparse::call(self_, layout, blocksize, dense_dim);
10953 }
10954 at::functionalization::impl::replace_(out, tmp_output);
10955 at::functionalization::impl::commit_update(out);
10956 at::functionalization::impl::sync(out);
10957 return out;
10958 }
10959 }
10960
10961 at::Tensor & to_sparse_bsr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) {
10962 if (false) {
10963 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
10964 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
10965 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
10966 auto self_meta = to_meta(self);
10967 auto out_meta = to_meta(out);
10968 at::AutoDispatchSkipFunctionalize func_guard;
10969 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
10970 at::_ops::to_sparse_bsr_out::call(self_meta, blocksize, dense_dim, out_meta);
10971 }
10972
10973 at::Tensor self_;
10974 if (at::functionalization::impl::isFunctionalTensor(self)) {
10975 at::functionalization::impl::sync(self);
10976 self_ = at::functionalization::impl::from_functional_tensor(self);
10977 } else {
10978 self_ = self;
10979 }
10980
10981 at::Tensor out_;
10982 if (at::functionalization::impl::isFunctionalTensor(out)) {
10983 at::functionalization::impl::sync(out);
10984 out_ = at::functionalization::impl::from_functional_tensor(out);
10985 } else {
10986 out_ = out;
10987 }
10988 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
10989 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
10990 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
10991 TORCH_INTERNAL_ASSERT(false,
10992 "mutating a non-functional tensor with a functional tensor is not allowed.",
10993 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
10994 } else {
10995 // case 2: arguments are not functional tensors, so we no-op and redispatch.
10996 at::AutoDispatchSkipFunctionalize guard;
10997 at::Tensor tmp_output = at::_ops::to_sparse_bsr_out::call(self_, blocksize, dense_dim, out_);
10998 return out;;
10999 }
11000 } else {
11001 at::Tensor tmp_output;
11002 {
11003 at::AutoDispatchSkipFunctionalize guard;
11004 tmp_output = at::_ops::to_sparse_bsr::call(self_, blocksize, dense_dim);
11005 }
11006 at::functionalization::impl::replace_(out, tmp_output);
11007 at::functionalization::impl::commit_update(out);
11008 at::functionalization::impl::sync(out);
11009 return out;
11010 }
11011 }
11012
11013 at::Tensor & to_mkldnn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
11014 if (false) {
11015 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11016 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11017 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11018 auto self_meta = to_meta(self);
11019 auto out_meta = to_meta(out);
11020 at::AutoDispatchSkipFunctionalize func_guard;
11021 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11022 at::_ops::to_mkldnn_out::call(self_meta, dtype, out_meta);
11023 }
11024
11025 at::Tensor self_;
11026 if (at::functionalization::impl::isFunctionalTensor(self)) {
11027 at::functionalization::impl::sync(self);
11028 self_ = at::functionalization::impl::from_functional_tensor(self);
11029 } else {
11030 self_ = self;
11031 }
11032
11033 at::Tensor out_;
11034 if (at::functionalization::impl::isFunctionalTensor(out)) {
11035 at::functionalization::impl::sync(out);
11036 out_ = at::functionalization::impl::from_functional_tensor(out);
11037 } else {
11038 out_ = out;
11039 }
11040 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
11041 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
11042 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11043 TORCH_INTERNAL_ASSERT(false,
11044 "mutating a non-functional tensor with a functional tensor is not allowed.",
11045 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11046 } else {
11047 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11048 at::AutoDispatchSkipFunctionalize guard;
11049 at::Tensor tmp_output = at::_ops::to_mkldnn_out::call(self_, dtype, out_);
11050 return out;;
11051 }
11052 } else {
11053 at::Tensor tmp_output;
11054 {
11055 at::AutoDispatchSkipFunctionalize guard;
11056 tmp_output = at::_ops::to_mkldnn::call(self_, dtype);
11057 }
11058 at::functionalization::impl::replace_(out, tmp_output);
11059 at::functionalization::impl::commit_update(out);
11060 at::functionalization::impl::sync(out);
11061 return out;
11062 }
11063 }
11064
11065 at::Tensor & mkldnn_reorder_conv3d_weight_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
11066 if (false) {
11067 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11068 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11069 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11070 auto self_meta = to_meta(self);
11071 auto out_meta = to_meta(out);
11072 at::AutoDispatchSkipFunctionalize func_guard;
11073 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11074 at::_ops::mkldnn_reorder_conv3d_weight_out::call(self_meta, padding, stride, dilation, groups, out_meta);
11075 }
11076
11077 at::Tensor self_;
11078 if (at::functionalization::impl::isFunctionalTensor(self)) {
11079 at::functionalization::impl::sync(self);
11080 self_ = at::functionalization::impl::from_functional_tensor(self);
11081 } else {
11082 self_ = self;
11083 }
11084
11085 at::Tensor out_;
11086 if (at::functionalization::impl::isFunctionalTensor(out)) {
11087 at::functionalization::impl::sync(out);
11088 out_ = at::functionalization::impl::from_functional_tensor(out);
11089 } else {
11090 out_ = out;
11091 }
11092 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
11093 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
11094 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11095 TORCH_INTERNAL_ASSERT(false,
11096 "mutating a non-functional tensor with a functional tensor is not allowed.",
11097 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11098 } else {
11099 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11100 at::AutoDispatchSkipFunctionalize guard;
11101 at::Tensor tmp_output = at::_ops::mkldnn_reorder_conv3d_weight_out::call(self_, padding, stride, dilation, groups, out_);
11102 return out;;
11103 }
11104 } else {
11105 at::Tensor tmp_output;
11106 {
11107 at::AutoDispatchSkipFunctionalize guard;
11108 tmp_output = at::_ops::mkldnn_reorder_conv3d_weight::call(self_, padding, stride, dilation, groups);
11109 }
11110 at::functionalization::impl::replace_(out, tmp_output);
11111 at::functionalization::impl::commit_update(out);
11112 at::functionalization::impl::sync(out);
11113 return out;
11114 }
11115 }
11116
11117 at::Tensor & q_per_channel_scales_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
11118 if (false) {
11119 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11120 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11121 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11122 auto self_meta = to_meta(self);
11123 auto out_meta = to_meta(out);
11124 at::AutoDispatchSkipFunctionalize func_guard;
11125 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11126 at::_ops::q_per_channel_scales_out::call(self_meta, out_meta);
11127 }
11128
11129 at::Tensor self_;
11130 if (at::functionalization::impl::isFunctionalTensor(self)) {
11131 at::functionalization::impl::sync(self);
11132 self_ = at::functionalization::impl::from_functional_tensor(self);
11133 } else {
11134 self_ = self;
11135 }
11136
11137 at::Tensor out_;
11138 if (at::functionalization::impl::isFunctionalTensor(out)) {
11139 at::functionalization::impl::sync(out);
11140 out_ = at::functionalization::impl::from_functional_tensor(out);
11141 } else {
11142 out_ = out;
11143 }
11144 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
11145 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
11146 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11147 TORCH_INTERNAL_ASSERT(false,
11148 "mutating a non-functional tensor with a functional tensor is not allowed.",
11149 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11150 } else {
11151 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11152 at::AutoDispatchSkipFunctionalize guard;
11153 at::Tensor tmp_output = at::_ops::q_per_channel_scales_out::call(self_, out_);
11154 return out;;
11155 }
11156 } else {
11157 at::Tensor tmp_output;
11158 {
11159 at::AutoDispatchSkipFunctionalize guard;
11160 tmp_output = at::_ops::q_per_channel_scales::call(self_);
11161 }
11162 at::functionalization::impl::replace_(out, tmp_output);
11163 at::functionalization::impl::commit_update(out);
11164 at::functionalization::impl::sync(out);
11165 return out;
11166 }
11167 }
11168
11169 at::Tensor & int_repr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
11170 if (false) {
11171 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11172 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11173 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11174 auto self_meta = to_meta(self);
11175 auto out_meta = to_meta(out);
11176 at::AutoDispatchSkipFunctionalize func_guard;
11177 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11178 at::_ops::int_repr_out::call(self_meta, out_meta);
11179 }
11180
11181 at::Tensor self_;
11182 if (at::functionalization::impl::isFunctionalTensor(self)) {
11183 at::functionalization::impl::sync(self);
11184 self_ = at::functionalization::impl::from_functional_tensor(self);
11185 } else {
11186 self_ = self;
11187 }
11188
11189 at::Tensor out_;
11190 if (at::functionalization::impl::isFunctionalTensor(out)) {
11191 at::functionalization::impl::sync(out);
11192 out_ = at::functionalization::impl::from_functional_tensor(out);
11193 } else {
11194 out_ = out;
11195 }
11196 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
11197 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
11198 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11199 TORCH_INTERNAL_ASSERT(false,
11200 "mutating a non-functional tensor with a functional tensor is not allowed.",
11201 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11202 } else {
11203 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11204 at::AutoDispatchSkipFunctionalize guard;
11205 at::Tensor tmp_output = at::_ops::int_repr_out::call(self_, out_);
11206 return out;;
11207 }
11208 } else {
11209 at::Tensor tmp_output;
11210 {
11211 at::AutoDispatchSkipFunctionalize guard;
11212 tmp_output = at::_ops::int_repr::call(self_);
11213 }
11214 at::functionalization::impl::replace_(out, tmp_output);
11215 at::functionalization::impl::commit_update(out);
11216 at::functionalization::impl::sync(out);
11217 return out;
11218 }
11219 }
11220
11221 at::Tensor & _make_per_tensor_quantized_tensor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out) {
11222 if (false) {
11223 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11224 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11225 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11226 auto self_meta = to_meta(self);
11227 auto out_meta = to_meta(out);
11228 at::AutoDispatchSkipFunctionalize func_guard;
11229 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11230 at::_ops::_make_per_tensor_quantized_tensor_out::call(self_meta, scale, zero_point, out_meta);
11231 }
11232
11233 at::Tensor self_;
11234 if (at::functionalization::impl::isFunctionalTensor(self)) {
11235 at::functionalization::impl::sync(self);
11236 self_ = at::functionalization::impl::from_functional_tensor(self);
11237 } else {
11238 self_ = self;
11239 }
11240
11241 at::Tensor out_;
11242 if (at::functionalization::impl::isFunctionalTensor(out)) {
11243 at::functionalization::impl::sync(out);
11244 out_ = at::functionalization::impl::from_functional_tensor(out);
11245 } else {
11246 out_ = out;
11247 }
11248 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
11249 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
11250 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11251 TORCH_INTERNAL_ASSERT(false,
11252 "mutating a non-functional tensor with a functional tensor is not allowed.",
11253 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11254 } else {
11255 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11256 at::AutoDispatchSkipFunctionalize guard;
11257 at::Tensor tmp_output = at::_ops::_make_per_tensor_quantized_tensor_out::call(self_, scale, zero_point, out_);
11258 return out;;
11259 }
11260 } else {
11261 at::Tensor tmp_output;
11262 {
11263 at::AutoDispatchSkipFunctionalize guard;
11264 tmp_output = at::_ops::_make_per_tensor_quantized_tensor::call(self_, scale, zero_point);
11265 }
11266 at::functionalization::impl::replace_(out, tmp_output);
11267 at::functionalization::impl::commit_update(out);
11268 at::functionalization::impl::sync(out);
11269 return out;
11270 }
11271 }
11272
11273 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
11274 if (false) {
11275 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11276 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11277 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11278 auto input_gates_meta = to_meta(input_gates);
11279 auto hidden_gates_meta = to_meta(hidden_gates);
11280 auto cx_meta = to_meta(cx);
11281 auto input_bias_meta = to_meta(input_bias);
11282 auto hidden_bias_meta = to_meta(hidden_bias);
11283 auto out0_meta = to_meta(out0);
11284 auto out1_meta = to_meta(out1);
11285 auto out2_meta = to_meta(out2);
11286 at::AutoDispatchSkipFunctionalize func_guard;
11287 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11288 at::_ops::_thnn_fused_lstm_cell_out::call(input_gates_meta, hidden_gates_meta, cx_meta, input_bias_meta, hidden_bias_meta, out0_meta, out1_meta, out2_meta);
11289 }
11290
11291 at::Tensor input_gates_;
11292 if (at::functionalization::impl::isFunctionalTensor(input_gates)) {
11293 at::functionalization::impl::sync(input_gates);
11294 input_gates_ = at::functionalization::impl::from_functional_tensor(input_gates);
11295 } else {
11296 input_gates_ = input_gates;
11297 }
11298
11299 at::Tensor hidden_gates_;
11300 if (at::functionalization::impl::isFunctionalTensor(hidden_gates)) {
11301 at::functionalization::impl::sync(hidden_gates);
11302 hidden_gates_ = at::functionalization::impl::from_functional_tensor(hidden_gates);
11303 } else {
11304 hidden_gates_ = hidden_gates;
11305 }
11306
11307 at::Tensor cx_;
11308 if (at::functionalization::impl::isFunctionalTensor(cx)) {
11309 at::functionalization::impl::sync(cx);
11310 cx_ = at::functionalization::impl::from_functional_tensor(cx);
11311 } else {
11312 cx_ = cx;
11313 }
11314
11315 c10::optional<at::Tensor> input_bias_;
11316 if (at::functionalization::impl::isFunctionalTensor(input_bias)) {
11317 at::functionalization::impl::sync(input_bias);
11318 input_bias_ = at::functionalization::impl::from_functional_tensor(input_bias);
11319 } else {
11320 input_bias_ = input_bias;
11321 }
11322
11323 c10::optional<at::Tensor> hidden_bias_;
11324 if (at::functionalization::impl::isFunctionalTensor(hidden_bias)) {
11325 at::functionalization::impl::sync(hidden_bias);
11326 hidden_bias_ = at::functionalization::impl::from_functional_tensor(hidden_bias);
11327 } else {
11328 hidden_bias_ = hidden_bias;
11329 }
11330
11331 at::Tensor out0_;
11332 if (at::functionalization::impl::isFunctionalTensor(out0)) {
11333 at::functionalization::impl::sync(out0);
11334 out0_ = at::functionalization::impl::from_functional_tensor(out0);
11335 } else {
11336 out0_ = out0;
11337 }
11338
11339 at::Tensor out1_;
11340 if (at::functionalization::impl::isFunctionalTensor(out1)) {
11341 at::functionalization::impl::sync(out1);
11342 out1_ = at::functionalization::impl::from_functional_tensor(out1);
11343 } else {
11344 out1_ = out1;
11345 }
11346
11347 at::Tensor out2_;
11348 if (at::functionalization::impl::isFunctionalTensor(out2)) {
11349 at::functionalization::impl::sync(out2);
11350 out2_ = at::functionalization::impl::from_functional_tensor(out2);
11351 } else {
11352 out2_ = out2;
11353 }
11354 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
11355 if ((false || at::functionalization::impl::isFunctionalTensor(input_gates) || at::functionalization::impl::isFunctionalTensor(hidden_gates) || at::functionalization::impl::isFunctionalTensor(cx) || at::functionalization::impl::isFunctionalTensor(input_bias) || at::functionalization::impl::isFunctionalTensor(hidden_bias))) {
11356 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11357 TORCH_INTERNAL_ASSERT(false,
11358 "mutating a non-functional tensor with a functional tensor is not allowed.",
11359 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11360 } else {
11361 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11362 at::AutoDispatchSkipFunctionalize guard;
11363 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_thnn_fused_lstm_cell_out::call(input_gates_, hidden_gates_, cx_, input_bias_, hidden_bias_, out0_, out1_, out2_);
11364 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);;
11365 }
11366 } else {
11367 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
11368 {
11369 at::AutoDispatchSkipFunctionalize guard;
11370 tmp_output = at::_ops::_thnn_fused_lstm_cell::call(input_gates_, hidden_gates_, cx_, input_bias_, hidden_bias_);
11371 }
11372 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
11373 at::functionalization::impl::commit_update(out0);
11374 at::functionalization::impl::sync(out0);
11375 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
11376 at::functionalization::impl::commit_update(out1);
11377 at::functionalization::impl::sync(out1);
11378 at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
11379 at::functionalization::impl::commit_update(out2);
11380 at::functionalization::impl::sync(out2);
11381 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
11382 }
11383 }
11384
11385 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_out_out(c10::DispatchKeySet dispatchKeySet, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
11386 if (false) {
11387 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11388 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11389 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11390 auto grad_hy_meta = to_meta(grad_hy);
11391 auto grad_cy_meta = to_meta(grad_cy);
11392 auto cx_meta = to_meta(cx);
11393 auto cy_meta = to_meta(cy);
11394 auto workspace_meta = to_meta(workspace);
11395 auto out0_meta = to_meta(out0);
11396 auto out1_meta = to_meta(out1);
11397 auto out2_meta = to_meta(out2);
11398 at::AutoDispatchSkipFunctionalize func_guard;
11399 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11400 at::_ops::_thnn_fused_lstm_cell_backward_impl_out::call(grad_hy_meta, grad_cy_meta, cx_meta, cy_meta, workspace_meta, has_bias, out0_meta, out1_meta, out2_meta);
11401 }
11402
11403 c10::optional<at::Tensor> grad_hy_;
11404 if (at::functionalization::impl::isFunctionalTensor(grad_hy)) {
11405 at::functionalization::impl::sync(grad_hy);
11406 grad_hy_ = at::functionalization::impl::from_functional_tensor(grad_hy);
11407 } else {
11408 grad_hy_ = grad_hy;
11409 }
11410
11411 c10::optional<at::Tensor> grad_cy_;
11412 if (at::functionalization::impl::isFunctionalTensor(grad_cy)) {
11413 at::functionalization::impl::sync(grad_cy);
11414 grad_cy_ = at::functionalization::impl::from_functional_tensor(grad_cy);
11415 } else {
11416 grad_cy_ = grad_cy;
11417 }
11418
11419 at::Tensor cx_;
11420 if (at::functionalization::impl::isFunctionalTensor(cx)) {
11421 at::functionalization::impl::sync(cx);
11422 cx_ = at::functionalization::impl::from_functional_tensor(cx);
11423 } else {
11424 cx_ = cx;
11425 }
11426
11427 at::Tensor cy_;
11428 if (at::functionalization::impl::isFunctionalTensor(cy)) {
11429 at::functionalization::impl::sync(cy);
11430 cy_ = at::functionalization::impl::from_functional_tensor(cy);
11431 } else {
11432 cy_ = cy;
11433 }
11434
11435 at::Tensor workspace_;
11436 if (at::functionalization::impl::isFunctionalTensor(workspace)) {
11437 at::functionalization::impl::sync(workspace);
11438 workspace_ = at::functionalization::impl::from_functional_tensor(workspace);
11439 } else {
11440 workspace_ = workspace;
11441 }
11442
11443 at::Tensor out0_;
11444 if (at::functionalization::impl::isFunctionalTensor(out0)) {
11445 at::functionalization::impl::sync(out0);
11446 out0_ = at::functionalization::impl::from_functional_tensor(out0);
11447 } else {
11448 out0_ = out0;
11449 }
11450
11451 at::Tensor out1_;
11452 if (at::functionalization::impl::isFunctionalTensor(out1)) {
11453 at::functionalization::impl::sync(out1);
11454 out1_ = at::functionalization::impl::from_functional_tensor(out1);
11455 } else {
11456 out1_ = out1;
11457 }
11458
11459 at::Tensor out2_;
11460 if (at::functionalization::impl::isFunctionalTensor(out2)) {
11461 at::functionalization::impl::sync(out2);
11462 out2_ = at::functionalization::impl::from_functional_tensor(out2);
11463 } else {
11464 out2_ = out2;
11465 }
11466 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
11467 if ((false || at::functionalization::impl::isFunctionalTensor(grad_hy) || at::functionalization::impl::isFunctionalTensor(grad_cy) || at::functionalization::impl::isFunctionalTensor(cx) || at::functionalization::impl::isFunctionalTensor(cy) || at::functionalization::impl::isFunctionalTensor(workspace))) {
11468 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11469 TORCH_INTERNAL_ASSERT(false,
11470 "mutating a non-functional tensor with a functional tensor is not allowed.",
11471 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11472 } else {
11473 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11474 at::AutoDispatchSkipFunctionalize guard;
11475 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_thnn_fused_lstm_cell_backward_impl_out::call(grad_hy_, grad_cy_, cx_, cy_, workspace_, has_bias, out0_, out1_, out2_);
11476 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);;
11477 }
11478 } else {
11479 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
11480 {
11481 at::AutoDispatchSkipFunctionalize guard;
11482 tmp_output = at::_ops::_thnn_fused_lstm_cell_backward_impl::call(grad_hy_, grad_cy_, cx_, cy_, workspace_, has_bias);
11483 }
11484 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
11485 at::functionalization::impl::commit_update(out0);
11486 at::functionalization::impl::sync(out0);
11487 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
11488 at::functionalization::impl::commit_update(out1);
11489 at::functionalization::impl::sync(out1);
11490 at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
11491 at::functionalization::impl::commit_update(out2);
11492 at::functionalization::impl::sync(out2);
11493 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
11494 }
11495 }
11496
11497 at::Tensor & masked_fill_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) {
11498 if (false) {
11499 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11500 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11501 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11502 auto self_meta = to_meta(self);
11503 auto mask_meta = to_meta(mask);
11504 auto out_meta = to_meta(out);
11505 at::AutoDispatchSkipFunctionalize func_guard;
11506 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11507 at::_ops::masked_fill_Scalar_out::call(self_meta, mask_meta, value, out_meta);
11508 }
11509
11510 at::Tensor self_;
11511 if (at::functionalization::impl::isFunctionalTensor(self)) {
11512 at::functionalization::impl::sync(self);
11513 self_ = at::functionalization::impl::from_functional_tensor(self);
11514 } else {
11515 self_ = self;
11516 }
11517
11518 at::Tensor mask_;
11519 if (at::functionalization::impl::isFunctionalTensor(mask)) {
11520 at::functionalization::impl::sync(mask);
11521 mask_ = at::functionalization::impl::from_functional_tensor(mask);
11522 } else {
11523 mask_ = mask;
11524 }
11525
11526 at::Tensor out_;
11527 if (at::functionalization::impl::isFunctionalTensor(out)) {
11528 at::functionalization::impl::sync(out);
11529 out_ = at::functionalization::impl::from_functional_tensor(out);
11530 } else {
11531 out_ = out;
11532 }
11533 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
11534 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask))) {
11535 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11536 TORCH_INTERNAL_ASSERT(false,
11537 "mutating a non-functional tensor with a functional tensor is not allowed.",
11538 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11539 } else {
11540 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11541 at::AutoDispatchSkipFunctionalize guard;
11542 at::Tensor tmp_output = at::_ops::masked_fill_Scalar_out::call(self_, mask_, value, out_);
11543 return out;;
11544 }
11545 } else {
11546 at::Tensor tmp_output;
11547 {
11548 at::AutoDispatchSkipFunctionalize guard;
11549 tmp_output = at::_ops::masked_fill_Scalar::call(self_, mask_, value);
11550 }
11551 at::functionalization::impl::replace_(out, tmp_output);
11552 at::functionalization::impl::commit_update(out);
11553 at::functionalization::impl::sync(out);
11554 return out;
11555 }
11556 }
11557
11558 at::Tensor & masked_fill__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
11559 if (true) {
11560 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11561 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11562 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11563 auto self_meta = to_meta(self);
11564 auto mask_meta = to_meta(mask);
11565 at::AutoDispatchSkipFunctionalize func_guard;
11566 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11567 at::_ops::masked_fill__Scalar::call(self_meta, mask_meta, value);
11568 }
11569
11570 at::Tensor self_;
11571 if (at::functionalization::impl::isFunctionalTensor(self)) {
11572 at::functionalization::impl::sync(self);
11573 self_ = at::functionalization::impl::from_functional_tensor(self);
11574 } else {
11575 self_ = self;
11576 }
11577
11578 at::Tensor mask_;
11579 if (at::functionalization::impl::isFunctionalTensor(mask)) {
11580 at::functionalization::impl::sync(mask);
11581 mask_ = at::functionalization::impl::from_functional_tensor(mask);
11582 } else {
11583 mask_ = mask;
11584 }
11585 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
11586 if ((false || at::functionalization::impl::isFunctionalTensor(mask))) {
11587 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11588 TORCH_INTERNAL_ASSERT(false,
11589 "mutating a non-functional tensor with a functional tensor is not allowed.",
11590 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11591 } else {
11592 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11593 at::AutoDispatchSkipFunctionalize guard;
11594 at::Tensor tmp_output = at::_ops::masked_fill__Scalar::call(self_, mask_, value);
11595 return self;;
11596 }
11597 } else {
11598 at::Tensor tmp_output;
11599 {
11600 at::AutoDispatchSkipFunctionalize guard;
11601 tmp_output = at::_ops::masked_fill_Scalar::call(self_, mask_, value);
11602 }
11603 at::functionalization::impl::replace_(self, tmp_output);
11604 at::functionalization::impl::commit_update(self);
11605 at::functionalization::impl::sync(self);
11606 return self;
11607 }
11608 }
11609
11610 at::Tensor & masked_fill_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out) {
11611 if (false) {
11612 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11613 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11614 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11615 auto self_meta = to_meta(self);
11616 auto mask_meta = to_meta(mask);
11617 auto value_meta = to_meta(value);
11618 auto out_meta = to_meta(out);
11619 at::AutoDispatchSkipFunctionalize func_guard;
11620 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11621 at::_ops::masked_fill_Tensor_out::call(self_meta, mask_meta, value_meta, out_meta);
11622 }
11623
11624 at::Tensor self_;
11625 if (at::functionalization::impl::isFunctionalTensor(self)) {
11626 at::functionalization::impl::sync(self);
11627 self_ = at::functionalization::impl::from_functional_tensor(self);
11628 } else {
11629 self_ = self;
11630 }
11631
11632 at::Tensor mask_;
11633 if (at::functionalization::impl::isFunctionalTensor(mask)) {
11634 at::functionalization::impl::sync(mask);
11635 mask_ = at::functionalization::impl::from_functional_tensor(mask);
11636 } else {
11637 mask_ = mask;
11638 }
11639
11640 at::Tensor value_;
11641 if (at::functionalization::impl::isFunctionalTensor(value)) {
11642 at::functionalization::impl::sync(value);
11643 value_ = at::functionalization::impl::from_functional_tensor(value);
11644 } else {
11645 value_ = value;
11646 }
11647
11648 at::Tensor out_;
11649 if (at::functionalization::impl::isFunctionalTensor(out)) {
11650 at::functionalization::impl::sync(out);
11651 out_ = at::functionalization::impl::from_functional_tensor(out);
11652 } else {
11653 out_ = out;
11654 }
11655 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
11656 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask) || at::functionalization::impl::isFunctionalTensor(value))) {
11657 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11658 TORCH_INTERNAL_ASSERT(false,
11659 "mutating a non-functional tensor with a functional tensor is not allowed.",
11660 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11661 } else {
11662 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11663 at::AutoDispatchSkipFunctionalize guard;
11664 at::Tensor tmp_output = at::_ops::masked_fill_Tensor_out::call(self_, mask_, value_, out_);
11665 return out;;
11666 }
11667 } else {
11668 at::Tensor tmp_output;
11669 {
11670 at::AutoDispatchSkipFunctionalize guard;
11671 tmp_output = at::_ops::masked_fill_Tensor::call(self_, mask_, value_);
11672 }
11673 at::functionalization::impl::replace_(out, tmp_output);
11674 at::functionalization::impl::commit_update(out);
11675 at::functionalization::impl::sync(out);
11676 return out;
11677 }
11678 }
11679
11680 at::Tensor & masked_fill__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
11681 if (true) {
11682 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11683 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11684 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11685 auto self_meta = to_meta(self);
11686 auto mask_meta = to_meta(mask);
11687 auto value_meta = to_meta(value);
11688 at::AutoDispatchSkipFunctionalize func_guard;
11689 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11690 at::_ops::masked_fill__Tensor::call(self_meta, mask_meta, value_meta);
11691 }
11692
11693 at::Tensor self_;
11694 if (at::functionalization::impl::isFunctionalTensor(self)) {
11695 at::functionalization::impl::sync(self);
11696 self_ = at::functionalization::impl::from_functional_tensor(self);
11697 } else {
11698 self_ = self;
11699 }
11700
11701 at::Tensor mask_;
11702 if (at::functionalization::impl::isFunctionalTensor(mask)) {
11703 at::functionalization::impl::sync(mask);
11704 mask_ = at::functionalization::impl::from_functional_tensor(mask);
11705 } else {
11706 mask_ = mask;
11707 }
11708
11709 at::Tensor value_;
11710 if (at::functionalization::impl::isFunctionalTensor(value)) {
11711 at::functionalization::impl::sync(value);
11712 value_ = at::functionalization::impl::from_functional_tensor(value);
11713 } else {
11714 value_ = value;
11715 }
11716 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
11717 if ((false || at::functionalization::impl::isFunctionalTensor(mask) || at::functionalization::impl::isFunctionalTensor(value))) {
11718 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11719 TORCH_INTERNAL_ASSERT(false,
11720 "mutating a non-functional tensor with a functional tensor is not allowed.",
11721 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11722 } else {
11723 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11724 at::AutoDispatchSkipFunctionalize guard;
11725 at::Tensor tmp_output = at::_ops::masked_fill__Tensor::call(self_, mask_, value_);
11726 return self;;
11727 }
11728 } else {
11729 at::Tensor tmp_output;
11730 {
11731 at::AutoDispatchSkipFunctionalize guard;
11732 tmp_output = at::_ops::masked_fill_Tensor::call(self_, mask_, value_);
11733 }
11734 at::functionalization::impl::replace_(self, tmp_output);
11735 at::functionalization::impl::commit_update(self);
11736 at::functionalization::impl::sync(self);
11737 return self;
11738 }
11739 }
11740
11741 at::Tensor & _masked_softmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type, at::Tensor & out) {
11742 if (false) {
11743 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11744 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11745 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11746 auto self_meta = to_meta(self);
11747 auto mask_meta = to_meta(mask);
11748 auto out_meta = to_meta(out);
11749 at::AutoDispatchSkipFunctionalize func_guard;
11750 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11751 at::_ops::_masked_softmax_out::call(self_meta, mask_meta, dim, mask_type, out_meta);
11752 }
11753
11754 at::Tensor self_;
11755 if (at::functionalization::impl::isFunctionalTensor(self)) {
11756 at::functionalization::impl::sync(self);
11757 self_ = at::functionalization::impl::from_functional_tensor(self);
11758 } else {
11759 self_ = self;
11760 }
11761
11762 at::Tensor mask_;
11763 if (at::functionalization::impl::isFunctionalTensor(mask)) {
11764 at::functionalization::impl::sync(mask);
11765 mask_ = at::functionalization::impl::from_functional_tensor(mask);
11766 } else {
11767 mask_ = mask;
11768 }
11769
11770 at::Tensor out_;
11771 if (at::functionalization::impl::isFunctionalTensor(out)) {
11772 at::functionalization::impl::sync(out);
11773 out_ = at::functionalization::impl::from_functional_tensor(out);
11774 } else {
11775 out_ = out;
11776 }
11777 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
11778 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask))) {
11779 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11780 TORCH_INTERNAL_ASSERT(false,
11781 "mutating a non-functional tensor with a functional tensor is not allowed.",
11782 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11783 } else {
11784 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11785 at::AutoDispatchSkipFunctionalize guard;
11786 at::Tensor tmp_output = at::_ops::_masked_softmax_out::call(self_, mask_, dim, mask_type, out_);
11787 return out;;
11788 }
11789 } else {
11790 at::Tensor tmp_output;
11791 {
11792 at::AutoDispatchSkipFunctionalize guard;
11793 tmp_output = at::_ops::_masked_softmax::call(self_, mask_, dim, mask_type);
11794 }
11795 at::functionalization::impl::replace_(out, tmp_output);
11796 at::functionalization::impl::commit_update(out);
11797 at::functionalization::impl::sync(out);
11798 return out;
11799 }
11800 }
11801
11802 at::Tensor & bitwise_right_shift_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11803 if (false) {
11804 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11805 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11806 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11807 auto self_meta = to_meta(self);
11808 auto other_meta = to_meta(other);
11809 auto out_meta = to_meta(out);
11810 at::AutoDispatchSkipFunctionalize func_guard;
11811 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11812 at::_ops::bitwise_right_shift_Tensor_out::call(self_meta, other_meta, out_meta);
11813 }
11814
11815 at::Tensor self_;
11816 if (at::functionalization::impl::isFunctionalTensor(self)) {
11817 at::functionalization::impl::sync(self);
11818 self_ = at::functionalization::impl::from_functional_tensor(self);
11819 } else {
11820 self_ = self;
11821 }
11822
11823 at::Tensor other_;
11824 if (at::functionalization::impl::isFunctionalTensor(other)) {
11825 at::functionalization::impl::sync(other);
11826 other_ = at::functionalization::impl::from_functional_tensor(other);
11827 } else {
11828 other_ = other;
11829 }
11830
11831 at::Tensor out_;
11832 if (at::functionalization::impl::isFunctionalTensor(out)) {
11833 at::functionalization::impl::sync(out);
11834 out_ = at::functionalization::impl::from_functional_tensor(out);
11835 } else {
11836 out_ = out;
11837 }
11838 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
11839 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
11840 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11841 TORCH_INTERNAL_ASSERT(false,
11842 "mutating a non-functional tensor with a functional tensor is not allowed.",
11843 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11844 } else {
11845 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11846 at::AutoDispatchSkipFunctionalize guard;
11847 at::Tensor tmp_output = at::_ops::bitwise_right_shift_Tensor_out::call(self_, other_, out_);
11848 return out;;
11849 }
11850 } else {
11851 at::Tensor tmp_output;
11852 {
11853 at::AutoDispatchSkipFunctionalize guard;
11854 tmp_output = at::_ops::bitwise_right_shift_Tensor::call(self_, other_);
11855 }
11856 at::functionalization::impl::replace_(out, tmp_output);
11857 at::functionalization::impl::commit_update(out);
11858 at::functionalization::impl::sync(out);
11859 return out;
11860 }
11861 }
11862
11863 at::Tensor & bitwise_right_shift__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
11864 if (true) {
11865 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11866 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11867 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11868 auto self_meta = to_meta(self);
11869 auto other_meta = to_meta(other);
11870 at::AutoDispatchSkipFunctionalize func_guard;
11871 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11872 at::_ops::bitwise_right_shift__Tensor::call(self_meta, other_meta);
11873 }
11874
11875 at::Tensor self_;
11876 if (at::functionalization::impl::isFunctionalTensor(self)) {
11877 at::functionalization::impl::sync(self);
11878 self_ = at::functionalization::impl::from_functional_tensor(self);
11879 } else {
11880 self_ = self;
11881 }
11882
11883 at::Tensor other_;
11884 if (at::functionalization::impl::isFunctionalTensor(other)) {
11885 at::functionalization::impl::sync(other);
11886 other_ = at::functionalization::impl::from_functional_tensor(other);
11887 } else {
11888 other_ = other;
11889 }
11890 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
11891 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
11892 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11893 TORCH_INTERNAL_ASSERT(false,
11894 "mutating a non-functional tensor with a functional tensor is not allowed.",
11895 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11896 } else {
11897 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11898 at::AutoDispatchSkipFunctionalize guard;
11899 at::Tensor tmp_output = at::_ops::bitwise_right_shift__Tensor::call(self_, other_);
11900 return self;;
11901 }
11902 } else {
11903 at::Tensor tmp_output;
11904 {
11905 at::AutoDispatchSkipFunctionalize guard;
11906 tmp_output = at::_ops::bitwise_right_shift_Tensor::call(self_, other_);
11907 }
11908 at::functionalization::impl::replace_(self, tmp_output);
11909 at::functionalization::impl::commit_update(self);
11910 at::functionalization::impl::sync(self);
11911 return self;
11912 }
11913 }
11914
11915 at::Tensor & bitwise_right_shift_out_Tensor_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
11916 if (false) {
11917 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11918 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11919 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11920 auto self_meta = to_meta(self);
11921 auto out_meta = to_meta(out);
11922 at::AutoDispatchSkipFunctionalize func_guard;
11923 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11924 at::_ops::bitwise_right_shift_Tensor_Scalar_out::call(self_meta, other, out_meta);
11925 }
11926
11927 at::Tensor self_;
11928 if (at::functionalization::impl::isFunctionalTensor(self)) {
11929 at::functionalization::impl::sync(self);
11930 self_ = at::functionalization::impl::from_functional_tensor(self);
11931 } else {
11932 self_ = self;
11933 }
11934
11935 at::Tensor out_;
11936 if (at::functionalization::impl::isFunctionalTensor(out)) {
11937 at::functionalization::impl::sync(out);
11938 out_ = at::functionalization::impl::from_functional_tensor(out);
11939 } else {
11940 out_ = out;
11941 }
11942 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
11943 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
11944 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11945 TORCH_INTERNAL_ASSERT(false,
11946 "mutating a non-functional tensor with a functional tensor is not allowed.",
11947 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11948 } else {
11949 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11950 at::AutoDispatchSkipFunctionalize guard;
11951 at::Tensor tmp_output = at::_ops::bitwise_right_shift_Tensor_Scalar_out::call(self_, other, out_);
11952 return out;;
11953 }
11954 } else {
11955 at::Tensor tmp_output;
11956 {
11957 at::AutoDispatchSkipFunctionalize guard;
11958 tmp_output = at::_ops::bitwise_right_shift_Tensor_Scalar::call(self_, other);
11959 }
11960 at::functionalization::impl::replace_(out, tmp_output);
11961 at::functionalization::impl::commit_update(out);
11962 at::functionalization::impl::sync(out);
11963 return out;
11964 }
11965 }
11966
11967 at::Tensor & bitwise_right_shift__Tensor_Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
11968 if (true) {
11969 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
11970 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
11971 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
11972 auto self_meta = to_meta(self);
11973 at::AutoDispatchSkipFunctionalize func_guard;
11974 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
11975 at::_ops::bitwise_right_shift__Tensor_Scalar::call(self_meta, other);
11976 }
11977
11978 at::Tensor self_;
11979 if (at::functionalization::impl::isFunctionalTensor(self)) {
11980 at::functionalization::impl::sync(self);
11981 self_ = at::functionalization::impl::from_functional_tensor(self);
11982 } else {
11983 self_ = self;
11984 }
11985 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
11986 if ((false)) {
11987 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
11988 TORCH_INTERNAL_ASSERT(false,
11989 "mutating a non-functional tensor with a functional tensor is not allowed.",
11990 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
11991 } else {
11992 // case 2: arguments are not functional tensors, so we no-op and redispatch.
11993 at::AutoDispatchSkipFunctionalize guard;
11994 at::Tensor tmp_output = at::_ops::bitwise_right_shift__Tensor_Scalar::call(self_, other);
11995 return self;;
11996 }
11997 } else {
11998 at::Tensor tmp_output;
11999 {
12000 at::AutoDispatchSkipFunctionalize guard;
12001 tmp_output = at::_ops::bitwise_right_shift_Tensor_Scalar::call(self_, other);
12002 }
12003 at::functionalization::impl::replace_(self, tmp_output);
12004 at::functionalization::impl::commit_update(self);
12005 at::functionalization::impl::sync(self);
12006 return self;
12007 }
12008 }
12009
12010 at::Tensor & bitwise_right_shift_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
12011 if (false) {
12012 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12013 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12014 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12015 auto other_meta = to_meta(other);
12016 auto out_meta = to_meta(out);
12017 at::AutoDispatchSkipFunctionalize func_guard;
12018 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12019 at::_ops::bitwise_right_shift_Scalar_Tensor_out::call(self, other_meta, out_meta);
12020 }
12021
12022 at::Tensor other_;
12023 if (at::functionalization::impl::isFunctionalTensor(other)) {
12024 at::functionalization::impl::sync(other);
12025 other_ = at::functionalization::impl::from_functional_tensor(other);
12026 } else {
12027 other_ = other;
12028 }
12029
12030 at::Tensor out_;
12031 if (at::functionalization::impl::isFunctionalTensor(out)) {
12032 at::functionalization::impl::sync(out);
12033 out_ = at::functionalization::impl::from_functional_tensor(out);
12034 } else {
12035 out_ = out;
12036 }
12037 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
12038 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
12039 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12040 TORCH_INTERNAL_ASSERT(false,
12041 "mutating a non-functional tensor with a functional tensor is not allowed.",
12042 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12043 } else {
12044 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12045 at::AutoDispatchSkipFunctionalize guard;
12046 at::Tensor tmp_output = at::_ops::bitwise_right_shift_Scalar_Tensor_out::call(self, other_, out_);
12047 return out;;
12048 }
12049 } else {
12050 at::Tensor tmp_output;
12051 {
12052 at::AutoDispatchSkipFunctionalize guard;
12053 tmp_output = at::_ops::bitwise_right_shift_Scalar_Tensor::call(self, other_);
12054 }
12055 at::functionalization::impl::replace_(out, tmp_output);
12056 at::functionalization::impl::commit_update(out);
12057 at::functionalization::impl::sync(out);
12058 return out;
12059 }
12060 }
12061
12062 at::Tensor & cauchy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator, at::Tensor & out) {
12063 if (false) {
12064 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12065 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12066 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12067 auto self_meta = to_meta(self);
12068 auto out_meta = to_meta(out);
12069 at::AutoDispatchSkipFunctionalize func_guard;
12070 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12071 at::_ops::cauchy_out::call(self_meta, median, sigma, generator, out_meta);
12072 }
12073
12074 at::Tensor self_;
12075 if (at::functionalization::impl::isFunctionalTensor(self)) {
12076 at::functionalization::impl::sync(self);
12077 self_ = at::functionalization::impl::from_functional_tensor(self);
12078 } else {
12079 self_ = self;
12080 }
12081
12082 at::Tensor out_;
12083 if (at::functionalization::impl::isFunctionalTensor(out)) {
12084 at::functionalization::impl::sync(out);
12085 out_ = at::functionalization::impl::from_functional_tensor(out);
12086 } else {
12087 out_ = out;
12088 }
12089 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
12090 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
12091 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12092 TORCH_INTERNAL_ASSERT(false,
12093 "mutating a non-functional tensor with a functional tensor is not allowed.",
12094 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12095 } else {
12096 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12097 at::AutoDispatchSkipFunctionalize guard;
12098 at::Tensor tmp_output = at::_ops::cauchy_out::call(self_, median, sigma, generator, out_);
12099 return out;;
12100 }
12101 } else {
12102 at::Tensor tmp_output;
12103 {
12104 at::AutoDispatchSkipFunctionalize guard;
12105 tmp_output = at::_ops::cauchy::call(self_, median, sigma, generator);
12106 }
12107 at::functionalization::impl::replace_(out, tmp_output);
12108 at::functionalization::impl::commit_update(out);
12109 at::functionalization::impl::sync(out);
12110 return out;
12111 }
12112 }
12113
12114 at::Tensor & cauchy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
12115 if (true) {
12116 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12117 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12118 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12119 auto self_meta = to_meta(self);
12120 at::AutoDispatchSkipFunctionalize func_guard;
12121 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12122 at::_ops::cauchy_::call(self_meta, median, sigma, generator);
12123 }
12124
12125 at::Tensor self_;
12126 if (at::functionalization::impl::isFunctionalTensor(self)) {
12127 at::functionalization::impl::sync(self);
12128 self_ = at::functionalization::impl::from_functional_tensor(self);
12129 } else {
12130 self_ = self;
12131 }
12132 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
12133 if ((false)) {
12134 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12135 TORCH_INTERNAL_ASSERT(false,
12136 "mutating a non-functional tensor with a functional tensor is not allowed.",
12137 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12138 } else {
12139 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12140 at::AutoDispatchSkipFunctionalize guard;
12141 at::Tensor tmp_output = at::_ops::cauchy_::call(self_, median, sigma, generator);
12142 return self;;
12143 }
12144 } else {
12145 at::Tensor tmp_output;
12146 {
12147 at::AutoDispatchSkipFunctionalize guard;
12148 tmp_output = at::_ops::cauchy::call(self_, median, sigma, generator);
12149 }
12150 at::functionalization::impl::replace_(self, tmp_output);
12151 at::functionalization::impl::commit_update(self);
12152 at::functionalization::impl::sync(self);
12153 return self;
12154 }
12155 }
12156
12157 at::Tensor & log_normal_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) {
12158 if (false) {
12159 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12160 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12161 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12162 auto self_meta = to_meta(self);
12163 auto out_meta = to_meta(out);
12164 at::AutoDispatchSkipFunctionalize func_guard;
12165 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12166 at::_ops::log_normal_out::call(self_meta, mean, std, generator, out_meta);
12167 }
12168
12169 at::Tensor self_;
12170 if (at::functionalization::impl::isFunctionalTensor(self)) {
12171 at::functionalization::impl::sync(self);
12172 self_ = at::functionalization::impl::from_functional_tensor(self);
12173 } else {
12174 self_ = self;
12175 }
12176
12177 at::Tensor out_;
12178 if (at::functionalization::impl::isFunctionalTensor(out)) {
12179 at::functionalization::impl::sync(out);
12180 out_ = at::functionalization::impl::from_functional_tensor(out);
12181 } else {
12182 out_ = out;
12183 }
12184 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
12185 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
12186 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12187 TORCH_INTERNAL_ASSERT(false,
12188 "mutating a non-functional tensor with a functional tensor is not allowed.",
12189 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12190 } else {
12191 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12192 at::AutoDispatchSkipFunctionalize guard;
12193 at::Tensor tmp_output = at::_ops::log_normal_out::call(self_, mean, std, generator, out_);
12194 return out;;
12195 }
12196 } else {
12197 at::Tensor tmp_output;
12198 {
12199 at::AutoDispatchSkipFunctionalize guard;
12200 tmp_output = at::_ops::log_normal::call(self_, mean, std, generator);
12201 }
12202 at::functionalization::impl::replace_(out, tmp_output);
12203 at::functionalization::impl::commit_update(out);
12204 at::functionalization::impl::sync(out);
12205 return out;
12206 }
12207 }
12208
12209 at::Tensor & log_normal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
12210 if (true) {
12211 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12212 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12213 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12214 auto self_meta = to_meta(self);
12215 at::AutoDispatchSkipFunctionalize func_guard;
12216 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12217 at::_ops::log_normal_::call(self_meta, mean, std, generator);
12218 }
12219
12220 at::Tensor self_;
12221 if (at::functionalization::impl::isFunctionalTensor(self)) {
12222 at::functionalization::impl::sync(self);
12223 self_ = at::functionalization::impl::from_functional_tensor(self);
12224 } else {
12225 self_ = self;
12226 }
12227 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
12228 if ((false)) {
12229 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12230 TORCH_INTERNAL_ASSERT(false,
12231 "mutating a non-functional tensor with a functional tensor is not allowed.",
12232 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12233 } else {
12234 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12235 at::AutoDispatchSkipFunctionalize guard;
12236 at::Tensor tmp_output = at::_ops::log_normal_::call(self_, mean, std, generator);
12237 return self;;
12238 }
12239 } else {
12240 at::Tensor tmp_output;
12241 {
12242 at::AutoDispatchSkipFunctionalize guard;
12243 tmp_output = at::_ops::log_normal::call(self_, mean, std, generator);
12244 }
12245 at::functionalization::impl::replace_(self, tmp_output);
12246 at::functionalization::impl::commit_update(self);
12247 at::functionalization::impl::sync(self);
12248 return self;
12249 }
12250 }
12251
12252 at::Tensor & diag_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
12253 if (false) {
12254 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12255 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12256 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12257 auto self_meta = to_meta(self);
12258 auto out_meta = to_meta(out);
12259 at::AutoDispatchSkipFunctionalize func_guard;
12260 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12261 at::_ops::diag_out::call(self_meta, diagonal, out_meta);
12262 }
12263
12264 at::Tensor self_;
12265 if (at::functionalization::impl::isFunctionalTensor(self)) {
12266 at::functionalization::impl::sync(self);
12267 self_ = at::functionalization::impl::from_functional_tensor(self);
12268 } else {
12269 self_ = self;
12270 }
12271
12272 at::Tensor out_;
12273 if (at::functionalization::impl::isFunctionalTensor(out)) {
12274 at::functionalization::impl::sync(out);
12275 out_ = at::functionalization::impl::from_functional_tensor(out);
12276 } else {
12277 out_ = out;
12278 }
12279 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
12280 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
12281 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12282 TORCH_INTERNAL_ASSERT(false,
12283 "mutating a non-functional tensor with a functional tensor is not allowed.",
12284 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12285 } else {
12286 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12287 at::AutoDispatchSkipFunctionalize guard;
12288 at::Tensor tmp_output = at::_ops::diag_out::call(self_, diagonal, out_);
12289 return out;;
12290 }
12291 } else {
12292 at::Tensor tmp_output;
12293 {
12294 at::AutoDispatchSkipFunctionalize guard;
12295 tmp_output = at::_ops::diag::call(self_, diagonal);
12296 }
12297 at::functionalization::impl::replace_(out, tmp_output);
12298 at::functionalization::impl::commit_update(out);
12299 at::functionalization::impl::sync(out);
12300 return out;
12301 }
12302 }
12303
12304 at::Tensor & ne_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
12305 if (false) {
12306 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12307 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12308 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12309 auto self_meta = to_meta(self);
12310 auto out_meta = to_meta(out);
12311 at::AutoDispatchSkipFunctionalize func_guard;
12312 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12313 at::_ops::ne_Scalar_out::call(self_meta, other, out_meta);
12314 }
12315
12316 at::Tensor self_;
12317 if (at::functionalization::impl::isFunctionalTensor(self)) {
12318 at::functionalization::impl::sync(self);
12319 self_ = at::functionalization::impl::from_functional_tensor(self);
12320 } else {
12321 self_ = self;
12322 }
12323
12324 at::Tensor out_;
12325 if (at::functionalization::impl::isFunctionalTensor(out)) {
12326 at::functionalization::impl::sync(out);
12327 out_ = at::functionalization::impl::from_functional_tensor(out);
12328 } else {
12329 out_ = out;
12330 }
12331 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
12332 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
12333 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12334 TORCH_INTERNAL_ASSERT(false,
12335 "mutating a non-functional tensor with a functional tensor is not allowed.",
12336 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12337 } else {
12338 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12339 at::AutoDispatchSkipFunctionalize guard;
12340 at::Tensor tmp_output = at::_ops::ne_Scalar_out::call(self_, other, out_);
12341 return out;;
12342 }
12343 } else {
12344 at::Tensor tmp_output;
12345 {
12346 at::AutoDispatchSkipFunctionalize guard;
12347 tmp_output = at::_ops::ne_Scalar::call(self_, other);
12348 }
12349 at::functionalization::impl::replace_(out, tmp_output);
12350 at::functionalization::impl::commit_update(out);
12351 at::functionalization::impl::sync(out);
12352 return out;
12353 }
12354 }
12355
12356 at::Tensor & ne__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
12357 if (true) {
12358 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12359 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12360 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12361 auto self_meta = to_meta(self);
12362 at::AutoDispatchSkipFunctionalize func_guard;
12363 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12364 at::_ops::ne__Scalar::call(self_meta, other);
12365 }
12366
12367 at::Tensor self_;
12368 if (at::functionalization::impl::isFunctionalTensor(self)) {
12369 at::functionalization::impl::sync(self);
12370 self_ = at::functionalization::impl::from_functional_tensor(self);
12371 } else {
12372 self_ = self;
12373 }
12374 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
12375 if ((false)) {
12376 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12377 TORCH_INTERNAL_ASSERT(false,
12378 "mutating a non-functional tensor with a functional tensor is not allowed.",
12379 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12380 } else {
12381 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12382 at::AutoDispatchSkipFunctionalize guard;
12383 at::Tensor tmp_output = at::_ops::ne__Scalar::call(self_, other);
12384 return self;;
12385 }
12386 } else {
12387 at::Tensor tmp_output;
12388 {
12389 at::AutoDispatchSkipFunctionalize guard;
12390 tmp_output = at::_ops::ne_Scalar::call(self_, other);
12391 }
12392 at::functionalization::impl::replace_(self, tmp_output);
12393 at::functionalization::impl::commit_update(self);
12394 at::functionalization::impl::sync(self);
12395 return self;
12396 }
12397 }
12398
12399 at::Tensor & ne_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
12400 if (false) {
12401 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12402 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12403 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12404 auto self_meta = to_meta(self);
12405 auto other_meta = to_meta(other);
12406 auto out_meta = to_meta(out);
12407 at::AutoDispatchSkipFunctionalize func_guard;
12408 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12409 at::_ops::ne_Tensor_out::call(self_meta, other_meta, out_meta);
12410 }
12411
12412 at::Tensor self_;
12413 if (at::functionalization::impl::isFunctionalTensor(self)) {
12414 at::functionalization::impl::sync(self);
12415 self_ = at::functionalization::impl::from_functional_tensor(self);
12416 } else {
12417 self_ = self;
12418 }
12419
12420 at::Tensor other_;
12421 if (at::functionalization::impl::isFunctionalTensor(other)) {
12422 at::functionalization::impl::sync(other);
12423 other_ = at::functionalization::impl::from_functional_tensor(other);
12424 } else {
12425 other_ = other;
12426 }
12427
12428 at::Tensor out_;
12429 if (at::functionalization::impl::isFunctionalTensor(out)) {
12430 at::functionalization::impl::sync(out);
12431 out_ = at::functionalization::impl::from_functional_tensor(out);
12432 } else {
12433 out_ = out;
12434 }
12435 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
12436 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
12437 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12438 TORCH_INTERNAL_ASSERT(false,
12439 "mutating a non-functional tensor with a functional tensor is not allowed.",
12440 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12441 } else {
12442 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12443 at::AutoDispatchSkipFunctionalize guard;
12444 at::Tensor tmp_output = at::_ops::ne_Tensor_out::call(self_, other_, out_);
12445 return out;;
12446 }
12447 } else {
12448 at::Tensor tmp_output;
12449 {
12450 at::AutoDispatchSkipFunctionalize guard;
12451 tmp_output = at::_ops::ne_Tensor::call(self_, other_);
12452 }
12453 at::functionalization::impl::replace_(out, tmp_output);
12454 at::functionalization::impl::commit_update(out);
12455 at::functionalization::impl::sync(out);
12456 return out;
12457 }
12458 }
12459
12460 at::Tensor & ne__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
12461 if (true) {
12462 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12463 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12464 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12465 auto self_meta = to_meta(self);
12466 auto other_meta = to_meta(other);
12467 at::AutoDispatchSkipFunctionalize func_guard;
12468 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12469 at::_ops::ne__Tensor::call(self_meta, other_meta);
12470 }
12471
12472 at::Tensor self_;
12473 if (at::functionalization::impl::isFunctionalTensor(self)) {
12474 at::functionalization::impl::sync(self);
12475 self_ = at::functionalization::impl::from_functional_tensor(self);
12476 } else {
12477 self_ = self;
12478 }
12479
12480 at::Tensor other_;
12481 if (at::functionalization::impl::isFunctionalTensor(other)) {
12482 at::functionalization::impl::sync(other);
12483 other_ = at::functionalization::impl::from_functional_tensor(other);
12484 } else {
12485 other_ = other;
12486 }
12487 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
12488 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
12489 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12490 TORCH_INTERNAL_ASSERT(false,
12491 "mutating a non-functional tensor with a functional tensor is not allowed.",
12492 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12493 } else {
12494 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12495 at::AutoDispatchSkipFunctionalize guard;
12496 at::Tensor tmp_output = at::_ops::ne__Tensor::call(self_, other_);
12497 return self;;
12498 }
12499 } else {
12500 at::Tensor tmp_output;
12501 {
12502 at::AutoDispatchSkipFunctionalize guard;
12503 tmp_output = at::_ops::ne_Tensor::call(self_, other_);
12504 }
12505 at::functionalization::impl::replace_(self, tmp_output);
12506 at::functionalization::impl::commit_update(self);
12507 at::functionalization::impl::sync(self);
12508 return self;
12509 }
12510 }
12511
12512 at::Tensor & not_equal_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
12513 if (false) {
12514 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12515 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12516 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12517 auto self_meta = to_meta(self);
12518 auto out_meta = to_meta(out);
12519 at::AutoDispatchSkipFunctionalize func_guard;
12520 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12521 at::_ops::not_equal_Scalar_out::call(self_meta, other, out_meta);
12522 }
12523
12524 at::Tensor self_;
12525 if (at::functionalization::impl::isFunctionalTensor(self)) {
12526 at::functionalization::impl::sync(self);
12527 self_ = at::functionalization::impl::from_functional_tensor(self);
12528 } else {
12529 self_ = self;
12530 }
12531
12532 at::Tensor out_;
12533 if (at::functionalization::impl::isFunctionalTensor(out)) {
12534 at::functionalization::impl::sync(out);
12535 out_ = at::functionalization::impl::from_functional_tensor(out);
12536 } else {
12537 out_ = out;
12538 }
12539 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
12540 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
12541 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12542 TORCH_INTERNAL_ASSERT(false,
12543 "mutating a non-functional tensor with a functional tensor is not allowed.",
12544 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12545 } else {
12546 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12547 at::AutoDispatchSkipFunctionalize guard;
12548 at::Tensor tmp_output = at::_ops::not_equal_Scalar_out::call(self_, other, out_);
12549 return out;;
12550 }
12551 } else {
12552 at::Tensor tmp_output;
12553 {
12554 at::AutoDispatchSkipFunctionalize guard;
12555 tmp_output = at::_ops::not_equal_Scalar::call(self_, other);
12556 }
12557 at::functionalization::impl::replace_(out, tmp_output);
12558 at::functionalization::impl::commit_update(out);
12559 at::functionalization::impl::sync(out);
12560 return out;
12561 }
12562 }
12563
12564 at::Tensor & not_equal__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
12565 if (true) {
12566 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12567 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12568 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12569 auto self_meta = to_meta(self);
12570 at::AutoDispatchSkipFunctionalize func_guard;
12571 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12572 at::_ops::not_equal__Scalar::call(self_meta, other);
12573 }
12574
12575 at::Tensor self_;
12576 if (at::functionalization::impl::isFunctionalTensor(self)) {
12577 at::functionalization::impl::sync(self);
12578 self_ = at::functionalization::impl::from_functional_tensor(self);
12579 } else {
12580 self_ = self;
12581 }
12582 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
12583 if ((false)) {
12584 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12585 TORCH_INTERNAL_ASSERT(false,
12586 "mutating a non-functional tensor with a functional tensor is not allowed.",
12587 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12588 } else {
12589 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12590 at::AutoDispatchSkipFunctionalize guard;
12591 at::Tensor tmp_output = at::_ops::not_equal__Scalar::call(self_, other);
12592 return self;;
12593 }
12594 } else {
12595 at::Tensor tmp_output;
12596 {
12597 at::AutoDispatchSkipFunctionalize guard;
12598 tmp_output = at::_ops::not_equal_Scalar::call(self_, other);
12599 }
12600 at::functionalization::impl::replace_(self, tmp_output);
12601 at::functionalization::impl::commit_update(self);
12602 at::functionalization::impl::sync(self);
12603 return self;
12604 }
12605 }
12606
12607 at::Tensor & not_equal_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
12608 if (false) {
12609 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12610 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12611 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12612 auto self_meta = to_meta(self);
12613 auto other_meta = to_meta(other);
12614 auto out_meta = to_meta(out);
12615 at::AutoDispatchSkipFunctionalize func_guard;
12616 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12617 at::_ops::not_equal_Tensor_out::call(self_meta, other_meta, out_meta);
12618 }
12619
12620 at::Tensor self_;
12621 if (at::functionalization::impl::isFunctionalTensor(self)) {
12622 at::functionalization::impl::sync(self);
12623 self_ = at::functionalization::impl::from_functional_tensor(self);
12624 } else {
12625 self_ = self;
12626 }
12627
12628 at::Tensor other_;
12629 if (at::functionalization::impl::isFunctionalTensor(other)) {
12630 at::functionalization::impl::sync(other);
12631 other_ = at::functionalization::impl::from_functional_tensor(other);
12632 } else {
12633 other_ = other;
12634 }
12635
12636 at::Tensor out_;
12637 if (at::functionalization::impl::isFunctionalTensor(out)) {
12638 at::functionalization::impl::sync(out);
12639 out_ = at::functionalization::impl::from_functional_tensor(out);
12640 } else {
12641 out_ = out;
12642 }
12643 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
12644 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
12645 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12646 TORCH_INTERNAL_ASSERT(false,
12647 "mutating a non-functional tensor with a functional tensor is not allowed.",
12648 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12649 } else {
12650 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12651 at::AutoDispatchSkipFunctionalize guard;
12652 at::Tensor tmp_output = at::_ops::not_equal_Tensor_out::call(self_, other_, out_);
12653 return out;;
12654 }
12655 } else {
12656 at::Tensor tmp_output;
12657 {
12658 at::AutoDispatchSkipFunctionalize guard;
12659 tmp_output = at::_ops::not_equal_Tensor::call(self_, other_);
12660 }
12661 at::functionalization::impl::replace_(out, tmp_output);
12662 at::functionalization::impl::commit_update(out);
12663 at::functionalization::impl::sync(out);
12664 return out;
12665 }
12666 }
12667
12668 at::Tensor & not_equal__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
12669 if (true) {
12670 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12671 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12672 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12673 auto self_meta = to_meta(self);
12674 auto other_meta = to_meta(other);
12675 at::AutoDispatchSkipFunctionalize func_guard;
12676 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12677 at::_ops::not_equal__Tensor::call(self_meta, other_meta);
12678 }
12679
12680 at::Tensor self_;
12681 if (at::functionalization::impl::isFunctionalTensor(self)) {
12682 at::functionalization::impl::sync(self);
12683 self_ = at::functionalization::impl::from_functional_tensor(self);
12684 } else {
12685 self_ = self;
12686 }
12687
12688 at::Tensor other_;
12689 if (at::functionalization::impl::isFunctionalTensor(other)) {
12690 at::functionalization::impl::sync(other);
12691 other_ = at::functionalization::impl::from_functional_tensor(other);
12692 } else {
12693 other_ = other;
12694 }
12695 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
12696 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
12697 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12698 TORCH_INTERNAL_ASSERT(false,
12699 "mutating a non-functional tensor with a functional tensor is not allowed.",
12700 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12701 } else {
12702 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12703 at::AutoDispatchSkipFunctionalize guard;
12704 at::Tensor tmp_output = at::_ops::not_equal__Tensor::call(self_, other_);
12705 return self;;
12706 }
12707 } else {
12708 at::Tensor tmp_output;
12709 {
12710 at::AutoDispatchSkipFunctionalize guard;
12711 tmp_output = at::_ops::not_equal_Tensor::call(self_, other_);
12712 }
12713 at::functionalization::impl::replace_(self, tmp_output);
12714 at::functionalization::impl::commit_update(self);
12715 at::functionalization::impl::sync(self);
12716 return self;
12717 }
12718 }
12719
12720 at::Tensor & addcmul_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
12721 if (false) {
12722 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12723 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12724 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12725 auto self_meta = to_meta(self);
12726 auto tensor1_meta = to_meta(tensor1);
12727 auto tensor2_meta = to_meta(tensor2);
12728 auto out_meta = to_meta(out);
12729 at::AutoDispatchSkipFunctionalize func_guard;
12730 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12731 at::_ops::addcmul_out::call(self_meta, tensor1_meta, tensor2_meta, value, out_meta);
12732 }
12733
12734 at::Tensor self_;
12735 if (at::functionalization::impl::isFunctionalTensor(self)) {
12736 at::functionalization::impl::sync(self);
12737 self_ = at::functionalization::impl::from_functional_tensor(self);
12738 } else {
12739 self_ = self;
12740 }
12741
12742 at::Tensor tensor1_;
12743 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
12744 at::functionalization::impl::sync(tensor1);
12745 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
12746 } else {
12747 tensor1_ = tensor1;
12748 }
12749
12750 at::Tensor tensor2_;
12751 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
12752 at::functionalization::impl::sync(tensor2);
12753 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
12754 } else {
12755 tensor2_ = tensor2;
12756 }
12757
12758 at::Tensor out_;
12759 if (at::functionalization::impl::isFunctionalTensor(out)) {
12760 at::functionalization::impl::sync(out);
12761 out_ = at::functionalization::impl::from_functional_tensor(out);
12762 } else {
12763 out_ = out;
12764 }
12765 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
12766 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
12767 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12768 TORCH_INTERNAL_ASSERT(false,
12769 "mutating a non-functional tensor with a functional tensor is not allowed.",
12770 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12771 } else {
12772 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12773 at::AutoDispatchSkipFunctionalize guard;
12774 at::Tensor tmp_output = at::_ops::addcmul_out::call(self_, tensor1_, tensor2_, value, out_);
12775 return out;;
12776 }
12777 } else {
12778 at::Tensor tmp_output;
12779 {
12780 at::AutoDispatchSkipFunctionalize guard;
12781 tmp_output = at::_ops::addcmul::call(self_, tensor1_, tensor2_, value);
12782 }
12783 at::functionalization::impl::replace_(out, tmp_output);
12784 at::functionalization::impl::commit_update(out);
12785 at::functionalization::impl::sync(out);
12786 return out;
12787 }
12788 }
12789
12790 at::Tensor & addcmul_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
12791 if (true) {
12792 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12793 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12794 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12795 auto self_meta = to_meta(self);
12796 auto tensor1_meta = to_meta(tensor1);
12797 auto tensor2_meta = to_meta(tensor2);
12798 at::AutoDispatchSkipFunctionalize func_guard;
12799 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12800 at::_ops::addcmul_::call(self_meta, tensor1_meta, tensor2_meta, value);
12801 }
12802
12803 at::Tensor self_;
12804 if (at::functionalization::impl::isFunctionalTensor(self)) {
12805 at::functionalization::impl::sync(self);
12806 self_ = at::functionalization::impl::from_functional_tensor(self);
12807 } else {
12808 self_ = self;
12809 }
12810
12811 at::Tensor tensor1_;
12812 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
12813 at::functionalization::impl::sync(tensor1);
12814 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
12815 } else {
12816 tensor1_ = tensor1;
12817 }
12818
12819 at::Tensor tensor2_;
12820 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
12821 at::functionalization::impl::sync(tensor2);
12822 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
12823 } else {
12824 tensor2_ = tensor2;
12825 }
12826 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
12827 if ((false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
12828 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12829 TORCH_INTERNAL_ASSERT(false,
12830 "mutating a non-functional tensor with a functional tensor is not allowed.",
12831 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12832 } else {
12833 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12834 at::AutoDispatchSkipFunctionalize guard;
12835 at::Tensor tmp_output = at::_ops::addcmul_::call(self_, tensor1_, tensor2_, value);
12836 return self;;
12837 }
12838 } else {
12839 at::Tensor tmp_output;
12840 {
12841 at::AutoDispatchSkipFunctionalize guard;
12842 tmp_output = at::_ops::addcmul::call(self_, tensor1_, tensor2_, value);
12843 }
12844 at::functionalization::impl::replace_(self, tmp_output);
12845 at::functionalization::impl::commit_update(self);
12846 at::functionalization::impl::sync(self);
12847 return self;
12848 }
12849 }
12850
12851 at::Tensor & ormqr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) {
12852 if (false) {
12853 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12854 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12855 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12856 auto self_meta = to_meta(self);
12857 auto input2_meta = to_meta(input2);
12858 auto input3_meta = to_meta(input3);
12859 auto out_meta = to_meta(out);
12860 at::AutoDispatchSkipFunctionalize func_guard;
12861 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12862 at::_ops::ormqr_out::call(self_meta, input2_meta, input3_meta, left, transpose, out_meta);
12863 }
12864
12865 at::Tensor self_;
12866 if (at::functionalization::impl::isFunctionalTensor(self)) {
12867 at::functionalization::impl::sync(self);
12868 self_ = at::functionalization::impl::from_functional_tensor(self);
12869 } else {
12870 self_ = self;
12871 }
12872
12873 at::Tensor input2_;
12874 if (at::functionalization::impl::isFunctionalTensor(input2)) {
12875 at::functionalization::impl::sync(input2);
12876 input2_ = at::functionalization::impl::from_functional_tensor(input2);
12877 } else {
12878 input2_ = input2;
12879 }
12880
12881 at::Tensor input3_;
12882 if (at::functionalization::impl::isFunctionalTensor(input3)) {
12883 at::functionalization::impl::sync(input3);
12884 input3_ = at::functionalization::impl::from_functional_tensor(input3);
12885 } else {
12886 input3_ = input3;
12887 }
12888
12889 at::Tensor out_;
12890 if (at::functionalization::impl::isFunctionalTensor(out)) {
12891 at::functionalization::impl::sync(out);
12892 out_ = at::functionalization::impl::from_functional_tensor(out);
12893 } else {
12894 out_ = out;
12895 }
12896 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
12897 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(input2) || at::functionalization::impl::isFunctionalTensor(input3))) {
12898 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12899 TORCH_INTERNAL_ASSERT(false,
12900 "mutating a non-functional tensor with a functional tensor is not allowed.",
12901 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12902 } else {
12903 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12904 at::AutoDispatchSkipFunctionalize guard;
12905 at::Tensor tmp_output = at::_ops::ormqr_out::call(self_, input2_, input3_, left, transpose, out_);
12906 return out;;
12907 }
12908 } else {
12909 at::Tensor tmp_output;
12910 {
12911 at::AutoDispatchSkipFunctionalize guard;
12912 tmp_output = at::_ops::ormqr::call(self_, input2_, input3_, left, transpose);
12913 }
12914 at::functionalization::impl::replace_(out, tmp_output);
12915 at::functionalization::impl::commit_update(out);
12916 at::functionalization::impl::sync(out);
12917 return out;
12918 }
12919 }
12920
12921 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> lu_unpack_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
12922 if (false) {
12923 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
12924 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
12925 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
12926 auto LU_data_meta = to_meta(LU_data);
12927 auto LU_pivots_meta = to_meta(LU_pivots);
12928 auto P_meta = to_meta(P);
12929 auto L_meta = to_meta(L);
12930 auto U_meta = to_meta(U);
12931 at::AutoDispatchSkipFunctionalize func_guard;
12932 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
12933 at::_ops::lu_unpack_out::call(LU_data_meta, LU_pivots_meta, unpack_data, unpack_pivots, P_meta, L_meta, U_meta);
12934 }
12935
12936 at::Tensor LU_data_;
12937 if (at::functionalization::impl::isFunctionalTensor(LU_data)) {
12938 at::functionalization::impl::sync(LU_data);
12939 LU_data_ = at::functionalization::impl::from_functional_tensor(LU_data);
12940 } else {
12941 LU_data_ = LU_data;
12942 }
12943
12944 at::Tensor LU_pivots_;
12945 if (at::functionalization::impl::isFunctionalTensor(LU_pivots)) {
12946 at::functionalization::impl::sync(LU_pivots);
12947 LU_pivots_ = at::functionalization::impl::from_functional_tensor(LU_pivots);
12948 } else {
12949 LU_pivots_ = LU_pivots;
12950 }
12951
12952 at::Tensor P_;
12953 if (at::functionalization::impl::isFunctionalTensor(P)) {
12954 at::functionalization::impl::sync(P);
12955 P_ = at::functionalization::impl::from_functional_tensor(P);
12956 } else {
12957 P_ = P;
12958 }
12959
12960 at::Tensor L_;
12961 if (at::functionalization::impl::isFunctionalTensor(L)) {
12962 at::functionalization::impl::sync(L);
12963 L_ = at::functionalization::impl::from_functional_tensor(L);
12964 } else {
12965 L_ = L;
12966 }
12967
12968 at::Tensor U_;
12969 if (at::functionalization::impl::isFunctionalTensor(U)) {
12970 at::functionalization::impl::sync(U);
12971 U_ = at::functionalization::impl::from_functional_tensor(U);
12972 } else {
12973 U_ = U;
12974 }
12975 if (!(true && at::functionalization::impl::isFunctionalTensor(P) && at::functionalization::impl::isFunctionalTensor(L) && at::functionalization::impl::isFunctionalTensor(U))) {
12976 if ((false || at::functionalization::impl::isFunctionalTensor(LU_data) || at::functionalization::impl::isFunctionalTensor(LU_pivots))) {
12977 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
12978 TORCH_INTERNAL_ASSERT(false,
12979 "mutating a non-functional tensor with a functional tensor is not allowed.",
12980 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
12981 } else {
12982 // case 2: arguments are not functional tensors, so we no-op and redispatch.
12983 at::AutoDispatchSkipFunctionalize guard;
12984 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::lu_unpack_out::call(LU_data_, LU_pivots_, unpack_data, unpack_pivots, P_, L_, U_);
12985 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(P, L, U);;
12986 }
12987 } else {
12988 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
12989 {
12990 at::AutoDispatchSkipFunctionalize guard;
12991 tmp_output = at::_ops::lu_unpack::call(LU_data_, LU_pivots_, unpack_data, unpack_pivots);
12992 }
12993 at::functionalization::impl::replace_(P, std::get<0>(tmp_output));
12994 at::functionalization::impl::commit_update(P);
12995 at::functionalization::impl::sync(P);
12996 at::functionalization::impl::replace_(L, std::get<1>(tmp_output));
12997 at::functionalization::impl::commit_update(L);
12998 at::functionalization::impl::sync(L);
12999 at::functionalization::impl::replace_(U, std::get<2>(tmp_output));
13000 at::functionalization::impl::commit_update(U);
13001 at::functionalization::impl::sync(U);
13002 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(P, L, U);
13003 }
13004 }
13005
13006 at::Tensor & dist_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p, at::Tensor & out) {
13007 if (false) {
13008 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13009 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13010 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13011 auto self_meta = to_meta(self);
13012 auto other_meta = to_meta(other);
13013 auto out_meta = to_meta(out);
13014 at::AutoDispatchSkipFunctionalize func_guard;
13015 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13016 at::_ops::dist_out::call(self_meta, other_meta, p, out_meta);
13017 }
13018
13019 at::Tensor self_;
13020 if (at::functionalization::impl::isFunctionalTensor(self)) {
13021 at::functionalization::impl::sync(self);
13022 self_ = at::functionalization::impl::from_functional_tensor(self);
13023 } else {
13024 self_ = self;
13025 }
13026
13027 at::Tensor other_;
13028 if (at::functionalization::impl::isFunctionalTensor(other)) {
13029 at::functionalization::impl::sync(other);
13030 other_ = at::functionalization::impl::from_functional_tensor(other);
13031 } else {
13032 other_ = other;
13033 }
13034
13035 at::Tensor out_;
13036 if (at::functionalization::impl::isFunctionalTensor(out)) {
13037 at::functionalization::impl::sync(out);
13038 out_ = at::functionalization::impl::from_functional_tensor(out);
13039 } else {
13040 out_ = out;
13041 }
13042 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
13043 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
13044 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13045 TORCH_INTERNAL_ASSERT(false,
13046 "mutating a non-functional tensor with a functional tensor is not allowed.",
13047 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13048 } else {
13049 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13050 at::AutoDispatchSkipFunctionalize guard;
13051 at::Tensor tmp_output = at::_ops::dist_out::call(self_, other_, p, out_);
13052 return out;;
13053 }
13054 } else {
13055 at::Tensor tmp_output;
13056 {
13057 at::AutoDispatchSkipFunctionalize guard;
13058 tmp_output = at::_ops::dist::call(self_, other_, p);
13059 }
13060 at::functionalization::impl::replace_(out, tmp_output);
13061 at::functionalization::impl::commit_update(out);
13062 at::functionalization::impl::sync(out);
13063 return out;
13064 }
13065 }
13066
13067 at::Tensor & arctan2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
13068 if (false) {
13069 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13070 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13071 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13072 auto self_meta = to_meta(self);
13073 auto other_meta = to_meta(other);
13074 auto out_meta = to_meta(out);
13075 at::AutoDispatchSkipFunctionalize func_guard;
13076 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13077 at::_ops::arctan2_out::call(self_meta, other_meta, out_meta);
13078 }
13079
13080 at::Tensor self_;
13081 if (at::functionalization::impl::isFunctionalTensor(self)) {
13082 at::functionalization::impl::sync(self);
13083 self_ = at::functionalization::impl::from_functional_tensor(self);
13084 } else {
13085 self_ = self;
13086 }
13087
13088 at::Tensor other_;
13089 if (at::functionalization::impl::isFunctionalTensor(other)) {
13090 at::functionalization::impl::sync(other);
13091 other_ = at::functionalization::impl::from_functional_tensor(other);
13092 } else {
13093 other_ = other;
13094 }
13095
13096 at::Tensor out_;
13097 if (at::functionalization::impl::isFunctionalTensor(out)) {
13098 at::functionalization::impl::sync(out);
13099 out_ = at::functionalization::impl::from_functional_tensor(out);
13100 } else {
13101 out_ = out;
13102 }
13103 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
13104 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
13105 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13106 TORCH_INTERNAL_ASSERT(false,
13107 "mutating a non-functional tensor with a functional tensor is not allowed.",
13108 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13109 } else {
13110 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13111 at::AutoDispatchSkipFunctionalize guard;
13112 at::Tensor tmp_output = at::_ops::arctan2_out::call(self_, other_, out_);
13113 return out;;
13114 }
13115 } else {
13116 at::Tensor tmp_output;
13117 {
13118 at::AutoDispatchSkipFunctionalize guard;
13119 tmp_output = at::_ops::arctan2::call(self_, other_);
13120 }
13121 at::functionalization::impl::replace_(out, tmp_output);
13122 at::functionalization::impl::commit_update(out);
13123 at::functionalization::impl::sync(out);
13124 return out;
13125 }
13126 }
13127
13128 at::Tensor & arctan2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
13129 if (true) {
13130 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13131 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13132 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13133 auto self_meta = to_meta(self);
13134 auto other_meta = to_meta(other);
13135 at::AutoDispatchSkipFunctionalize func_guard;
13136 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13137 at::_ops::arctan2_::call(self_meta, other_meta);
13138 }
13139
13140 at::Tensor self_;
13141 if (at::functionalization::impl::isFunctionalTensor(self)) {
13142 at::functionalization::impl::sync(self);
13143 self_ = at::functionalization::impl::from_functional_tensor(self);
13144 } else {
13145 self_ = self;
13146 }
13147
13148 at::Tensor other_;
13149 if (at::functionalization::impl::isFunctionalTensor(other)) {
13150 at::functionalization::impl::sync(other);
13151 other_ = at::functionalization::impl::from_functional_tensor(other);
13152 } else {
13153 other_ = other;
13154 }
13155 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
13156 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
13157 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13158 TORCH_INTERNAL_ASSERT(false,
13159 "mutating a non-functional tensor with a functional tensor is not allowed.",
13160 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13161 } else {
13162 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13163 at::AutoDispatchSkipFunctionalize guard;
13164 at::Tensor tmp_output = at::_ops::arctan2_::call(self_, other_);
13165 return self;;
13166 }
13167 } else {
13168 at::Tensor tmp_output;
13169 {
13170 at::AutoDispatchSkipFunctionalize guard;
13171 tmp_output = at::_ops::arctan2::call(self_, other_);
13172 }
13173 at::functionalization::impl::replace_(self, tmp_output);
13174 at::functionalization::impl::commit_update(self);
13175 at::functionalization::impl::sync(self);
13176 return self;
13177 }
13178 }
13179
13180 at::Tensor & histc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) {
13181 if (false) {
13182 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13183 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13184 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13185 auto self_meta = to_meta(self);
13186 auto out_meta = to_meta(out);
13187 at::AutoDispatchSkipFunctionalize func_guard;
13188 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13189 at::_ops::histc_out::call(self_meta, bins, min, max, out_meta);
13190 }
13191
13192 at::Tensor self_;
13193 if (at::functionalization::impl::isFunctionalTensor(self)) {
13194 at::functionalization::impl::sync(self);
13195 self_ = at::functionalization::impl::from_functional_tensor(self);
13196 } else {
13197 self_ = self;
13198 }
13199
13200 at::Tensor out_;
13201 if (at::functionalization::impl::isFunctionalTensor(out)) {
13202 at::functionalization::impl::sync(out);
13203 out_ = at::functionalization::impl::from_functional_tensor(out);
13204 } else {
13205 out_ = out;
13206 }
13207 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
13208 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
13209 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13210 TORCH_INTERNAL_ASSERT(false,
13211 "mutating a non-functional tensor with a functional tensor is not allowed.",
13212 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13213 } else {
13214 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13215 at::AutoDispatchSkipFunctionalize guard;
13216 at::Tensor tmp_output = at::_ops::histc_out::call(self_, bins, min, max, out_);
13217 return out;;
13218 }
13219 } else {
13220 at::Tensor tmp_output;
13221 {
13222 at::AutoDispatchSkipFunctionalize guard;
13223 tmp_output = at::_ops::histc::call(self_, bins, min, max);
13224 }
13225 at::functionalization::impl::replace_(out, tmp_output);
13226 at::functionalization::impl::commit_update(out);
13227 at::functionalization::impl::sync(out);
13228 return out;
13229 }
13230 }
13231
13232 at::Tensor & _histogramdd_from_bin_cts_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) {
13233 if (false) {
13234 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13235 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13236 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13237 auto self_meta = to_meta(self);
13238 auto weight_meta = to_meta(weight);
13239 auto out_meta = to_meta(out);
13240 at::AutoDispatchSkipFunctionalize func_guard;
13241 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13242 at::_ops::_histogramdd_from_bin_cts_out::call(self_meta, bins, range, weight_meta, density, out_meta);
13243 }
13244
13245 at::Tensor self_;
13246 if (at::functionalization::impl::isFunctionalTensor(self)) {
13247 at::functionalization::impl::sync(self);
13248 self_ = at::functionalization::impl::from_functional_tensor(self);
13249 } else {
13250 self_ = self;
13251 }
13252
13253 c10::optional<at::Tensor> weight_;
13254 if (at::functionalization::impl::isFunctionalTensor(weight)) {
13255 at::functionalization::impl::sync(weight);
13256 weight_ = at::functionalization::impl::from_functional_tensor(weight);
13257 } else {
13258 weight_ = weight;
13259 }
13260
13261 at::Tensor out_;
13262 if (at::functionalization::impl::isFunctionalTensor(out)) {
13263 at::functionalization::impl::sync(out);
13264 out_ = at::functionalization::impl::from_functional_tensor(out);
13265 } else {
13266 out_ = out;
13267 }
13268 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
13269 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) {
13270 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13271 TORCH_INTERNAL_ASSERT(false,
13272 "mutating a non-functional tensor with a functional tensor is not allowed.",
13273 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13274 } else {
13275 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13276 at::AutoDispatchSkipFunctionalize guard;
13277 at::Tensor tmp_output = at::_ops::_histogramdd_from_bin_cts_out::call(self_, bins, range, weight_, density, out_);
13278 return out;;
13279 }
13280 } else {
13281 at::Tensor tmp_output;
13282 {
13283 at::AutoDispatchSkipFunctionalize guard;
13284 tmp_output = at::_ops::_histogramdd_from_bin_cts::call(self_, bins, range, weight_, density);
13285 }
13286 at::functionalization::impl::replace_(out, tmp_output);
13287 at::functionalization::impl::commit_update(out);
13288 at::functionalization::impl::sync(out);
13289 return out;
13290 }
13291 }
13292
13293 at::Tensor & hypot_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
13294 if (false) {
13295 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13296 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13297 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13298 auto self_meta = to_meta(self);
13299 auto other_meta = to_meta(other);
13300 auto out_meta = to_meta(out);
13301 at::AutoDispatchSkipFunctionalize func_guard;
13302 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13303 at::_ops::hypot_out::call(self_meta, other_meta, out_meta);
13304 }
13305
13306 at::Tensor self_;
13307 if (at::functionalization::impl::isFunctionalTensor(self)) {
13308 at::functionalization::impl::sync(self);
13309 self_ = at::functionalization::impl::from_functional_tensor(self);
13310 } else {
13311 self_ = self;
13312 }
13313
13314 at::Tensor other_;
13315 if (at::functionalization::impl::isFunctionalTensor(other)) {
13316 at::functionalization::impl::sync(other);
13317 other_ = at::functionalization::impl::from_functional_tensor(other);
13318 } else {
13319 other_ = other;
13320 }
13321
13322 at::Tensor out_;
13323 if (at::functionalization::impl::isFunctionalTensor(out)) {
13324 at::functionalization::impl::sync(out);
13325 out_ = at::functionalization::impl::from_functional_tensor(out);
13326 } else {
13327 out_ = out;
13328 }
13329 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
13330 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
13331 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13332 TORCH_INTERNAL_ASSERT(false,
13333 "mutating a non-functional tensor with a functional tensor is not allowed.",
13334 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13335 } else {
13336 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13337 at::AutoDispatchSkipFunctionalize guard;
13338 at::Tensor tmp_output = at::_ops::hypot_out::call(self_, other_, out_);
13339 return out;;
13340 }
13341 } else {
13342 at::Tensor tmp_output;
13343 {
13344 at::AutoDispatchSkipFunctionalize guard;
13345 tmp_output = at::_ops::hypot::call(self_, other_);
13346 }
13347 at::functionalization::impl::replace_(out, tmp_output);
13348 at::functionalization::impl::commit_update(out);
13349 at::functionalization::impl::sync(out);
13350 return out;
13351 }
13352 }
13353
13354 at::Tensor & hypot_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
13355 if (true) {
13356 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13357 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13358 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13359 auto self_meta = to_meta(self);
13360 auto other_meta = to_meta(other);
13361 at::AutoDispatchSkipFunctionalize func_guard;
13362 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13363 at::_ops::hypot_::call(self_meta, other_meta);
13364 }
13365
13366 at::Tensor self_;
13367 if (at::functionalization::impl::isFunctionalTensor(self)) {
13368 at::functionalization::impl::sync(self);
13369 self_ = at::functionalization::impl::from_functional_tensor(self);
13370 } else {
13371 self_ = self;
13372 }
13373
13374 at::Tensor other_;
13375 if (at::functionalization::impl::isFunctionalTensor(other)) {
13376 at::functionalization::impl::sync(other);
13377 other_ = at::functionalization::impl::from_functional_tensor(other);
13378 } else {
13379 other_ = other;
13380 }
13381 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
13382 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
13383 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13384 TORCH_INTERNAL_ASSERT(false,
13385 "mutating a non-functional tensor with a functional tensor is not allowed.",
13386 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13387 } else {
13388 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13389 at::AutoDispatchSkipFunctionalize guard;
13390 at::Tensor tmp_output = at::_ops::hypot_::call(self_, other_);
13391 return self;;
13392 }
13393 } else {
13394 at::Tensor tmp_output;
13395 {
13396 at::AutoDispatchSkipFunctionalize guard;
13397 tmp_output = at::_ops::hypot::call(self_, other_);
13398 }
13399 at::functionalization::impl::replace_(self, tmp_output);
13400 at::functionalization::impl::commit_update(self);
13401 at::functionalization::impl::sync(self);
13402 return self;
13403 }
13404 }
13405
13406 at::Tensor & min_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
13407 if (false) {
13408 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13409 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13410 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13411 auto self_meta = to_meta(self);
13412 auto other_meta = to_meta(other);
13413 auto out_meta = to_meta(out);
13414 at::AutoDispatchSkipFunctionalize func_guard;
13415 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13416 at::_ops::min_out::call(self_meta, other_meta, out_meta);
13417 }
13418
13419 at::Tensor self_;
13420 if (at::functionalization::impl::isFunctionalTensor(self)) {
13421 at::functionalization::impl::sync(self);
13422 self_ = at::functionalization::impl::from_functional_tensor(self);
13423 } else {
13424 self_ = self;
13425 }
13426
13427 at::Tensor other_;
13428 if (at::functionalization::impl::isFunctionalTensor(other)) {
13429 at::functionalization::impl::sync(other);
13430 other_ = at::functionalization::impl::from_functional_tensor(other);
13431 } else {
13432 other_ = other;
13433 }
13434
13435 at::Tensor out_;
13436 if (at::functionalization::impl::isFunctionalTensor(out)) {
13437 at::functionalization::impl::sync(out);
13438 out_ = at::functionalization::impl::from_functional_tensor(out);
13439 } else {
13440 out_ = out;
13441 }
13442 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
13443 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
13444 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13445 TORCH_INTERNAL_ASSERT(false,
13446 "mutating a non-functional tensor with a functional tensor is not allowed.",
13447 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13448 } else {
13449 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13450 at::AutoDispatchSkipFunctionalize guard;
13451 at::Tensor tmp_output = at::_ops::min_out::call(self_, other_, out_);
13452 return out;;
13453 }
13454 } else {
13455 at::Tensor tmp_output;
13456 {
13457 at::AutoDispatchSkipFunctionalize guard;
13458 tmp_output = at::_ops::min_other::call(self_, other_);
13459 }
13460 at::functionalization::impl::replace_(out, tmp_output);
13461 at::functionalization::impl::commit_update(out);
13462 at::functionalization::impl::sync(out);
13463 return out;
13464 }
13465 }
13466
13467 at::Tensor & msort_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
13468 if (false) {
13469 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13470 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13471 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13472 auto self_meta = to_meta(self);
13473 auto out_meta = to_meta(out);
13474 at::AutoDispatchSkipFunctionalize func_guard;
13475 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13476 at::_ops::msort_out::call(self_meta, out_meta);
13477 }
13478
13479 at::Tensor self_;
13480 if (at::functionalization::impl::isFunctionalTensor(self)) {
13481 at::functionalization::impl::sync(self);
13482 self_ = at::functionalization::impl::from_functional_tensor(self);
13483 } else {
13484 self_ = self;
13485 }
13486
13487 at::Tensor out_;
13488 if (at::functionalization::impl::isFunctionalTensor(out)) {
13489 at::functionalization::impl::sync(out);
13490 out_ = at::functionalization::impl::from_functional_tensor(out);
13491 } else {
13492 out_ = out;
13493 }
13494 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
13495 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
13496 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13497 TORCH_INTERNAL_ASSERT(false,
13498 "mutating a non-functional tensor with a functional tensor is not allowed.",
13499 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13500 } else {
13501 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13502 at::AutoDispatchSkipFunctionalize guard;
13503 at::Tensor tmp_output = at::_ops::msort_out::call(self_, out_);
13504 return out;;
13505 }
13506 } else {
13507 at::Tensor tmp_output;
13508 {
13509 at::AutoDispatchSkipFunctionalize guard;
13510 tmp_output = at::_ops::msort::call(self_);
13511 }
13512 at::functionalization::impl::replace_(out, tmp_output);
13513 at::functionalization::impl::commit_update(out);
13514 at::functionalization::impl::sync(out);
13515 return out;
13516 }
13517 }
13518
13519 void _foreach_add_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
13520 if (false) {
13521 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13522 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13523 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13524 auto self_meta = to_meta(self);
13525 auto out_meta = to_meta(out);
13526 at::AutoDispatchSkipFunctionalize func_guard;
13527 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13528 at::_ops::_foreach_add_Scalar_out::call(self_meta, scalar, out_meta);
13529 }
13530
13531 ::std::vector<at::Tensor> self_;
13532 if (at::functionalization::impl::isFunctionalTensor(self)) {
13533 at::functionalization::impl::sync(self);
13534 self_ = at::functionalization::impl::from_functional_tensor(self);
13535 } else {
13536 self_ = self.vec();
13537 }
13538
13539 ::std::vector<at::Tensor> out_;
13540 if (at::functionalization::impl::isFunctionalTensor(out)) {
13541 at::functionalization::impl::sync(out);
13542 out_ = at::functionalization::impl::from_functional_tensor(out);
13543 } else {
13544 out_ = out.vec();
13545 }
13546 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
13547 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
13548 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13549 TORCH_INTERNAL_ASSERT(false,
13550 "mutating a non-functional tensor with a functional tensor is not allowed.",
13551 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13552 } else {
13553 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13554 at::AutoDispatchSkipFunctionalize guard;
13555 at::_ops::_foreach_add_Scalar_out::call(self_, scalar, out_);
13556 ;
13557 }
13558 } else {
13559 ::std::vector<at::Tensor> tmp_output;
13560 {
13561 at::AutoDispatchSkipFunctionalize guard;
13562 tmp_output = at::_ops::_foreach_add_Scalar::call(self_, scalar);
13563 }
13564 at::functionalization::impl::replace_(out, tmp_output);
13565 at::functionalization::impl::commit_update(out);
13566 at::functionalization::impl::sync(out);
13567
13568 }
13569 }
13570
13571 void _foreach_add__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
13572 if (true) {
13573 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13574 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13575 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13576 auto self_meta = to_meta(self);
13577 at::AutoDispatchSkipFunctionalize func_guard;
13578 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13579 at::_ops::_foreach_add__Scalar::call(self_meta, scalar);
13580 }
13581
13582 ::std::vector<at::Tensor> self_;
13583 if (at::functionalization::impl::isFunctionalTensor(self)) {
13584 at::functionalization::impl::sync(self);
13585 self_ = at::functionalization::impl::from_functional_tensor(self);
13586 } else {
13587 self_ = self.vec();
13588 }
13589 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
13590 if ((false)) {
13591 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13592 TORCH_INTERNAL_ASSERT(false,
13593 "mutating a non-functional tensor with a functional tensor is not allowed.",
13594 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13595 } else {
13596 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13597 at::AutoDispatchSkipFunctionalize guard;
13598 at::_ops::_foreach_add__Scalar::call(self_, scalar);
13599 ;
13600 }
13601 } else {
13602 ::std::vector<at::Tensor> tmp_output;
13603 {
13604 at::AutoDispatchSkipFunctionalize guard;
13605 tmp_output = at::_ops::_foreach_add_Scalar::call(self_, scalar);
13606 }
13607 at::functionalization::impl::replace_(self, tmp_output);
13608 at::functionalization::impl::commit_update(self);
13609 at::functionalization::impl::sync(self);
13610
13611 }
13612 }
13613
13614 void _foreach_clamp_min_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
13615 if (false) {
13616 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13617 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13618 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13619 auto self_meta = to_meta(self);
13620 auto out_meta = to_meta(out);
13621 at::AutoDispatchSkipFunctionalize func_guard;
13622 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13623 at::_ops::_foreach_clamp_min_Scalar_out::call(self_meta, scalar, out_meta);
13624 }
13625
13626 ::std::vector<at::Tensor> self_;
13627 if (at::functionalization::impl::isFunctionalTensor(self)) {
13628 at::functionalization::impl::sync(self);
13629 self_ = at::functionalization::impl::from_functional_tensor(self);
13630 } else {
13631 self_ = self.vec();
13632 }
13633
13634 ::std::vector<at::Tensor> out_;
13635 if (at::functionalization::impl::isFunctionalTensor(out)) {
13636 at::functionalization::impl::sync(out);
13637 out_ = at::functionalization::impl::from_functional_tensor(out);
13638 } else {
13639 out_ = out.vec();
13640 }
13641 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
13642 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
13643 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13644 TORCH_INTERNAL_ASSERT(false,
13645 "mutating a non-functional tensor with a functional tensor is not allowed.",
13646 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13647 } else {
13648 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13649 at::AutoDispatchSkipFunctionalize guard;
13650 at::_ops::_foreach_clamp_min_Scalar_out::call(self_, scalar, out_);
13651 ;
13652 }
13653 } else {
13654 ::std::vector<at::Tensor> tmp_output;
13655 {
13656 at::AutoDispatchSkipFunctionalize guard;
13657 tmp_output = at::_ops::_foreach_clamp_min_Scalar::call(self_, scalar);
13658 }
13659 at::functionalization::impl::replace_(out, tmp_output);
13660 at::functionalization::impl::commit_update(out);
13661 at::functionalization::impl::sync(out);
13662
13663 }
13664 }
13665
13666 void _foreach_clamp_min__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
13667 if (true) {
13668 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13669 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13670 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13671 auto self_meta = to_meta(self);
13672 at::AutoDispatchSkipFunctionalize func_guard;
13673 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13674 at::_ops::_foreach_clamp_min__Scalar::call(self_meta, scalar);
13675 }
13676
13677 ::std::vector<at::Tensor> self_;
13678 if (at::functionalization::impl::isFunctionalTensor(self)) {
13679 at::functionalization::impl::sync(self);
13680 self_ = at::functionalization::impl::from_functional_tensor(self);
13681 } else {
13682 self_ = self.vec();
13683 }
13684 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
13685 if ((false)) {
13686 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13687 TORCH_INTERNAL_ASSERT(false,
13688 "mutating a non-functional tensor with a functional tensor is not allowed.",
13689 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13690 } else {
13691 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13692 at::AutoDispatchSkipFunctionalize guard;
13693 at::_ops::_foreach_clamp_min__Scalar::call(self_, scalar);
13694 ;
13695 }
13696 } else {
13697 ::std::vector<at::Tensor> tmp_output;
13698 {
13699 at::AutoDispatchSkipFunctionalize guard;
13700 tmp_output = at::_ops::_foreach_clamp_min_Scalar::call(self_, scalar);
13701 }
13702 at::functionalization::impl::replace_(self, tmp_output);
13703 at::functionalization::impl::commit_update(self);
13704 at::functionalization::impl::sync(self);
13705
13706 }
13707 }
13708
13709 void _foreach_add_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
13710 if (false) {
13711 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13712 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13713 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13714 auto self_meta = to_meta(self);
13715 auto other_meta = to_meta(other);
13716 auto out_meta = to_meta(out);
13717 at::AutoDispatchSkipFunctionalize func_guard;
13718 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13719 at::_ops::_foreach_add_List_out::call(self_meta, other_meta, alpha, out_meta);
13720 }
13721
13722 ::std::vector<at::Tensor> self_;
13723 if (at::functionalization::impl::isFunctionalTensor(self)) {
13724 at::functionalization::impl::sync(self);
13725 self_ = at::functionalization::impl::from_functional_tensor(self);
13726 } else {
13727 self_ = self.vec();
13728 }
13729
13730 ::std::vector<at::Tensor> other_;
13731 if (at::functionalization::impl::isFunctionalTensor(other)) {
13732 at::functionalization::impl::sync(other);
13733 other_ = at::functionalization::impl::from_functional_tensor(other);
13734 } else {
13735 other_ = other.vec();
13736 }
13737
13738 ::std::vector<at::Tensor> out_;
13739 if (at::functionalization::impl::isFunctionalTensor(out)) {
13740 at::functionalization::impl::sync(out);
13741 out_ = at::functionalization::impl::from_functional_tensor(out);
13742 } else {
13743 out_ = out.vec();
13744 }
13745 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
13746 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
13747 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13748 TORCH_INTERNAL_ASSERT(false,
13749 "mutating a non-functional tensor with a functional tensor is not allowed.",
13750 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13751 } else {
13752 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13753 at::AutoDispatchSkipFunctionalize guard;
13754 at::_ops::_foreach_add_List_out::call(self_, other_, alpha, out_);
13755 ;
13756 }
13757 } else {
13758 ::std::vector<at::Tensor> tmp_output;
13759 {
13760 at::AutoDispatchSkipFunctionalize guard;
13761 tmp_output = at::_ops::_foreach_add_List::call(self_, other_, alpha);
13762 }
13763 at::functionalization::impl::replace_(out, tmp_output);
13764 at::functionalization::impl::commit_update(out);
13765 at::functionalization::impl::sync(out);
13766
13767 }
13768 }
13769
13770 void _foreach_add__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
13771 if (true) {
13772 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13773 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13774 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13775 auto self_meta = to_meta(self);
13776 auto other_meta = to_meta(other);
13777 at::AutoDispatchSkipFunctionalize func_guard;
13778 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13779 at::_ops::_foreach_add__List::call(self_meta, other_meta, alpha);
13780 }
13781
13782 ::std::vector<at::Tensor> self_;
13783 if (at::functionalization::impl::isFunctionalTensor(self)) {
13784 at::functionalization::impl::sync(self);
13785 self_ = at::functionalization::impl::from_functional_tensor(self);
13786 } else {
13787 self_ = self.vec();
13788 }
13789
13790 ::std::vector<at::Tensor> other_;
13791 if (at::functionalization::impl::isFunctionalTensor(other)) {
13792 at::functionalization::impl::sync(other);
13793 other_ = at::functionalization::impl::from_functional_tensor(other);
13794 } else {
13795 other_ = other.vec();
13796 }
13797 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
13798 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
13799 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13800 TORCH_INTERNAL_ASSERT(false,
13801 "mutating a non-functional tensor with a functional tensor is not allowed.",
13802 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13803 } else {
13804 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13805 at::AutoDispatchSkipFunctionalize guard;
13806 at::_ops::_foreach_add__List::call(self_, other_, alpha);
13807 ;
13808 }
13809 } else {
13810 ::std::vector<at::Tensor> tmp_output;
13811 {
13812 at::AutoDispatchSkipFunctionalize guard;
13813 tmp_output = at::_ops::_foreach_add_List::call(self_, other_, alpha);
13814 }
13815 at::functionalization::impl::replace_(self, tmp_output);
13816 at::functionalization::impl::commit_update(self);
13817 at::functionalization::impl::sync(self);
13818
13819 }
13820 }
13821
13822 void _foreach_clamp_min_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
13823 if (false) {
13824 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13825 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13826 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13827 auto self_meta = to_meta(self);
13828 auto other_meta = to_meta(other);
13829 auto out_meta = to_meta(out);
13830 at::AutoDispatchSkipFunctionalize func_guard;
13831 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13832 at::_ops::_foreach_clamp_min_List_out::call(self_meta, other_meta, out_meta);
13833 }
13834
13835 ::std::vector<at::Tensor> self_;
13836 if (at::functionalization::impl::isFunctionalTensor(self)) {
13837 at::functionalization::impl::sync(self);
13838 self_ = at::functionalization::impl::from_functional_tensor(self);
13839 } else {
13840 self_ = self.vec();
13841 }
13842
13843 ::std::vector<at::Tensor> other_;
13844 if (at::functionalization::impl::isFunctionalTensor(other)) {
13845 at::functionalization::impl::sync(other);
13846 other_ = at::functionalization::impl::from_functional_tensor(other);
13847 } else {
13848 other_ = other.vec();
13849 }
13850
13851 ::std::vector<at::Tensor> out_;
13852 if (at::functionalization::impl::isFunctionalTensor(out)) {
13853 at::functionalization::impl::sync(out);
13854 out_ = at::functionalization::impl::from_functional_tensor(out);
13855 } else {
13856 out_ = out.vec();
13857 }
13858 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
13859 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
13860 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13861 TORCH_INTERNAL_ASSERT(false,
13862 "mutating a non-functional tensor with a functional tensor is not allowed.",
13863 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13864 } else {
13865 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13866 at::AutoDispatchSkipFunctionalize guard;
13867 at::_ops::_foreach_clamp_min_List_out::call(self_, other_, out_);
13868 ;
13869 }
13870 } else {
13871 ::std::vector<at::Tensor> tmp_output;
13872 {
13873 at::AutoDispatchSkipFunctionalize guard;
13874 tmp_output = at::_ops::_foreach_clamp_min_List::call(self_, other_);
13875 }
13876 at::functionalization::impl::replace_(out, tmp_output);
13877 at::functionalization::impl::commit_update(out);
13878 at::functionalization::impl::sync(out);
13879
13880 }
13881 }
13882
13883 void _foreach_clamp_min__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
13884 if (true) {
13885 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13886 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13887 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13888 auto self_meta = to_meta(self);
13889 auto other_meta = to_meta(other);
13890 at::AutoDispatchSkipFunctionalize func_guard;
13891 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13892 at::_ops::_foreach_clamp_min__List::call(self_meta, other_meta);
13893 }
13894
13895 ::std::vector<at::Tensor> self_;
13896 if (at::functionalization::impl::isFunctionalTensor(self)) {
13897 at::functionalization::impl::sync(self);
13898 self_ = at::functionalization::impl::from_functional_tensor(self);
13899 } else {
13900 self_ = self.vec();
13901 }
13902
13903 ::std::vector<at::Tensor> other_;
13904 if (at::functionalization::impl::isFunctionalTensor(other)) {
13905 at::functionalization::impl::sync(other);
13906 other_ = at::functionalization::impl::from_functional_tensor(other);
13907 } else {
13908 other_ = other.vec();
13909 }
13910 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
13911 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
13912 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13913 TORCH_INTERNAL_ASSERT(false,
13914 "mutating a non-functional tensor with a functional tensor is not allowed.",
13915 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13916 } else {
13917 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13918 at::AutoDispatchSkipFunctionalize guard;
13919 at::_ops::_foreach_clamp_min__List::call(self_, other_);
13920 ;
13921 }
13922 } else {
13923 ::std::vector<at::Tensor> tmp_output;
13924 {
13925 at::AutoDispatchSkipFunctionalize guard;
13926 tmp_output = at::_ops::_foreach_clamp_min_List::call(self_, other_);
13927 }
13928 at::functionalization::impl::replace_(self, tmp_output);
13929 at::functionalization::impl::commit_update(self);
13930 at::functionalization::impl::sync(self);
13931
13932 }
13933 }
13934
13935 void _foreach_add_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
13936 if (false) {
13937 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13938 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13939 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13940 auto self_meta = to_meta(self);
13941 auto out_meta = to_meta(out);
13942 at::AutoDispatchSkipFunctionalize func_guard;
13943 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13944 at::_ops::_foreach_add_ScalarList_out::call(self_meta, scalars, out_meta);
13945 }
13946
13947 ::std::vector<at::Tensor> self_;
13948 if (at::functionalization::impl::isFunctionalTensor(self)) {
13949 at::functionalization::impl::sync(self);
13950 self_ = at::functionalization::impl::from_functional_tensor(self);
13951 } else {
13952 self_ = self.vec();
13953 }
13954
13955 ::std::vector<at::Tensor> out_;
13956 if (at::functionalization::impl::isFunctionalTensor(out)) {
13957 at::functionalization::impl::sync(out);
13958 out_ = at::functionalization::impl::from_functional_tensor(out);
13959 } else {
13960 out_ = out.vec();
13961 }
13962 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
13963 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
13964 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
13965 TORCH_INTERNAL_ASSERT(false,
13966 "mutating a non-functional tensor with a functional tensor is not allowed.",
13967 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
13968 } else {
13969 // case 2: arguments are not functional tensors, so we no-op and redispatch.
13970 at::AutoDispatchSkipFunctionalize guard;
13971 at::_ops::_foreach_add_ScalarList_out::call(self_, scalars, out_);
13972 ;
13973 }
13974 } else {
13975 ::std::vector<at::Tensor> tmp_output;
13976 {
13977 at::AutoDispatchSkipFunctionalize guard;
13978 tmp_output = at::_ops::_foreach_add_ScalarList::call(self_, scalars);
13979 }
13980 at::functionalization::impl::replace_(out, tmp_output);
13981 at::functionalization::impl::commit_update(out);
13982 at::functionalization::impl::sync(out);
13983
13984 }
13985 }
13986
13987 void _foreach_add__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
13988 if (true) {
13989 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
13990 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
13991 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
13992 auto self_meta = to_meta(self);
13993 at::AutoDispatchSkipFunctionalize func_guard;
13994 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
13995 at::_ops::_foreach_add__ScalarList::call(self_meta, scalars);
13996 }
13997
13998 ::std::vector<at::Tensor> self_;
13999 if (at::functionalization::impl::isFunctionalTensor(self)) {
14000 at::functionalization::impl::sync(self);
14001 self_ = at::functionalization::impl::from_functional_tensor(self);
14002 } else {
14003 self_ = self.vec();
14004 }
14005 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
14006 if ((false)) {
14007 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14008 TORCH_INTERNAL_ASSERT(false,
14009 "mutating a non-functional tensor with a functional tensor is not allowed.",
14010 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14011 } else {
14012 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14013 at::AutoDispatchSkipFunctionalize guard;
14014 at::_ops::_foreach_add__ScalarList::call(self_, scalars);
14015 ;
14016 }
14017 } else {
14018 ::std::vector<at::Tensor> tmp_output;
14019 {
14020 at::AutoDispatchSkipFunctionalize guard;
14021 tmp_output = at::_ops::_foreach_add_ScalarList::call(self_, scalars);
14022 }
14023 at::functionalization::impl::replace_(self, tmp_output);
14024 at::functionalization::impl::commit_update(self);
14025 at::functionalization::impl::sync(self);
14026
14027 }
14028 }
14029
14030 void _foreach_clamp_min_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
14031 if (false) {
14032 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14033 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14034 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14035 auto self_meta = to_meta(self);
14036 auto out_meta = to_meta(out);
14037 at::AutoDispatchSkipFunctionalize func_guard;
14038 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14039 at::_ops::_foreach_clamp_min_ScalarList_out::call(self_meta, scalars, out_meta);
14040 }
14041
14042 ::std::vector<at::Tensor> self_;
14043 if (at::functionalization::impl::isFunctionalTensor(self)) {
14044 at::functionalization::impl::sync(self);
14045 self_ = at::functionalization::impl::from_functional_tensor(self);
14046 } else {
14047 self_ = self.vec();
14048 }
14049
14050 ::std::vector<at::Tensor> out_;
14051 if (at::functionalization::impl::isFunctionalTensor(out)) {
14052 at::functionalization::impl::sync(out);
14053 out_ = at::functionalization::impl::from_functional_tensor(out);
14054 } else {
14055 out_ = out.vec();
14056 }
14057 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
14058 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
14059 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14060 TORCH_INTERNAL_ASSERT(false,
14061 "mutating a non-functional tensor with a functional tensor is not allowed.",
14062 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14063 } else {
14064 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14065 at::AutoDispatchSkipFunctionalize guard;
14066 at::_ops::_foreach_clamp_min_ScalarList_out::call(self_, scalars, out_);
14067 ;
14068 }
14069 } else {
14070 ::std::vector<at::Tensor> tmp_output;
14071 {
14072 at::AutoDispatchSkipFunctionalize guard;
14073 tmp_output = at::_ops::_foreach_clamp_min_ScalarList::call(self_, scalars);
14074 }
14075 at::functionalization::impl::replace_(out, tmp_output);
14076 at::functionalization::impl::commit_update(out);
14077 at::functionalization::impl::sync(out);
14078
14079 }
14080 }
14081
14082 void _foreach_clamp_min__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
14083 if (true) {
14084 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14085 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14086 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14087 auto self_meta = to_meta(self);
14088 at::AutoDispatchSkipFunctionalize func_guard;
14089 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14090 at::_ops::_foreach_clamp_min__ScalarList::call(self_meta, scalars);
14091 }
14092
14093 ::std::vector<at::Tensor> self_;
14094 if (at::functionalization::impl::isFunctionalTensor(self)) {
14095 at::functionalization::impl::sync(self);
14096 self_ = at::functionalization::impl::from_functional_tensor(self);
14097 } else {
14098 self_ = self.vec();
14099 }
14100 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
14101 if ((false)) {
14102 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14103 TORCH_INTERNAL_ASSERT(false,
14104 "mutating a non-functional tensor with a functional tensor is not allowed.",
14105 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14106 } else {
14107 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14108 at::AutoDispatchSkipFunctionalize guard;
14109 at::_ops::_foreach_clamp_min__ScalarList::call(self_, scalars);
14110 ;
14111 }
14112 } else {
14113 ::std::vector<at::Tensor> tmp_output;
14114 {
14115 at::AutoDispatchSkipFunctionalize guard;
14116 tmp_output = at::_ops::_foreach_clamp_min_ScalarList::call(self_, scalars);
14117 }
14118 at::functionalization::impl::replace_(self, tmp_output);
14119 at::functionalization::impl::commit_update(self);
14120 at::functionalization::impl::sync(self);
14121
14122 }
14123 }
14124
14125 void _foreach_zero_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
14126 if (false) {
14127 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14128 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14129 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14130 auto self_meta = to_meta(self);
14131 auto out_meta = to_meta(out);
14132 at::AutoDispatchSkipFunctionalize func_guard;
14133 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14134 at::_ops::_foreach_zero_out::call(self_meta, out_meta);
14135 }
14136
14137 ::std::vector<at::Tensor> self_;
14138 if (at::functionalization::impl::isFunctionalTensor(self)) {
14139 at::functionalization::impl::sync(self);
14140 self_ = at::functionalization::impl::from_functional_tensor(self);
14141 } else {
14142 self_ = self.vec();
14143 }
14144
14145 ::std::vector<at::Tensor> out_;
14146 if (at::functionalization::impl::isFunctionalTensor(out)) {
14147 at::functionalization::impl::sync(out);
14148 out_ = at::functionalization::impl::from_functional_tensor(out);
14149 } else {
14150 out_ = out.vec();
14151 }
14152 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
14153 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
14154 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14155 TORCH_INTERNAL_ASSERT(false,
14156 "mutating a non-functional tensor with a functional tensor is not allowed.",
14157 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14158 } else {
14159 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14160 at::AutoDispatchSkipFunctionalize guard;
14161 at::_ops::_foreach_zero_out::call(self_, out_);
14162 ;
14163 }
14164 } else {
14165 ::std::vector<at::Tensor> tmp_output;
14166 {
14167 at::AutoDispatchSkipFunctionalize guard;
14168 tmp_output = at::_ops::_foreach_zero::call(self_);
14169 }
14170 at::functionalization::impl::replace_(out, tmp_output);
14171 at::functionalization::impl::commit_update(out);
14172 at::functionalization::impl::sync(out);
14173
14174 }
14175 }
14176
14177 void _foreach_zero_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
14178 if (true) {
14179 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14180 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14181 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14182 auto self_meta = to_meta(self);
14183 at::AutoDispatchSkipFunctionalize func_guard;
14184 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14185 at::_ops::_foreach_zero_::call(self_meta);
14186 }
14187
14188 ::std::vector<at::Tensor> self_;
14189 if (at::functionalization::impl::isFunctionalTensor(self)) {
14190 at::functionalization::impl::sync(self);
14191 self_ = at::functionalization::impl::from_functional_tensor(self);
14192 } else {
14193 self_ = self.vec();
14194 }
14195 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
14196 if ((false)) {
14197 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14198 TORCH_INTERNAL_ASSERT(false,
14199 "mutating a non-functional tensor with a functional tensor is not allowed.",
14200 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14201 } else {
14202 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14203 at::AutoDispatchSkipFunctionalize guard;
14204 at::_ops::_foreach_zero_::call(self_);
14205 ;
14206 }
14207 } else {
14208 ::std::vector<at::Tensor> tmp_output;
14209 {
14210 at::AutoDispatchSkipFunctionalize guard;
14211 tmp_output = at::_ops::_foreach_zero::call(self_);
14212 }
14213 at::functionalization::impl::replace_(self, tmp_output);
14214 at::functionalization::impl::commit_update(self);
14215 at::functionalization::impl::sync(self);
14216
14217 }
14218 }
14219
14220 void _foreach_asin_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
14221 if (false) {
14222 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14223 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14224 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14225 auto self_meta = to_meta(self);
14226 auto out_meta = to_meta(out);
14227 at::AutoDispatchSkipFunctionalize func_guard;
14228 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14229 at::_ops::_foreach_asin_out::call(self_meta, out_meta);
14230 }
14231
14232 ::std::vector<at::Tensor> self_;
14233 if (at::functionalization::impl::isFunctionalTensor(self)) {
14234 at::functionalization::impl::sync(self);
14235 self_ = at::functionalization::impl::from_functional_tensor(self);
14236 } else {
14237 self_ = self.vec();
14238 }
14239
14240 ::std::vector<at::Tensor> out_;
14241 if (at::functionalization::impl::isFunctionalTensor(out)) {
14242 at::functionalization::impl::sync(out);
14243 out_ = at::functionalization::impl::from_functional_tensor(out);
14244 } else {
14245 out_ = out.vec();
14246 }
14247 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
14248 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
14249 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14250 TORCH_INTERNAL_ASSERT(false,
14251 "mutating a non-functional tensor with a functional tensor is not allowed.",
14252 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14253 } else {
14254 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14255 at::AutoDispatchSkipFunctionalize guard;
14256 at::_ops::_foreach_asin_out::call(self_, out_);
14257 ;
14258 }
14259 } else {
14260 ::std::vector<at::Tensor> tmp_output;
14261 {
14262 at::AutoDispatchSkipFunctionalize guard;
14263 tmp_output = at::_ops::_foreach_asin::call(self_);
14264 }
14265 at::functionalization::impl::replace_(out, tmp_output);
14266 at::functionalization::impl::commit_update(out);
14267 at::functionalization::impl::sync(out);
14268
14269 }
14270 }
14271
14272 void _foreach_asin_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
14273 if (true) {
14274 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14275 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14276 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14277 auto self_meta = to_meta(self);
14278 at::AutoDispatchSkipFunctionalize func_guard;
14279 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14280 at::_ops::_foreach_asin_::call(self_meta);
14281 }
14282
14283 ::std::vector<at::Tensor> self_;
14284 if (at::functionalization::impl::isFunctionalTensor(self)) {
14285 at::functionalization::impl::sync(self);
14286 self_ = at::functionalization::impl::from_functional_tensor(self);
14287 } else {
14288 self_ = self.vec();
14289 }
14290 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
14291 if ((false)) {
14292 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14293 TORCH_INTERNAL_ASSERT(false,
14294 "mutating a non-functional tensor with a functional tensor is not allowed.",
14295 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14296 } else {
14297 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14298 at::AutoDispatchSkipFunctionalize guard;
14299 at::_ops::_foreach_asin_::call(self_);
14300 ;
14301 }
14302 } else {
14303 ::std::vector<at::Tensor> tmp_output;
14304 {
14305 at::AutoDispatchSkipFunctionalize guard;
14306 tmp_output = at::_ops::_foreach_asin::call(self_);
14307 }
14308 at::functionalization::impl::replace_(self, tmp_output);
14309 at::functionalization::impl::commit_update(self);
14310 at::functionalization::impl::sync(self);
14311
14312 }
14313 }
14314
14315 void _foreach_ceil_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
14316 if (false) {
14317 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14318 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14319 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14320 auto self_meta = to_meta(self);
14321 auto out_meta = to_meta(out);
14322 at::AutoDispatchSkipFunctionalize func_guard;
14323 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14324 at::_ops::_foreach_ceil_out::call(self_meta, out_meta);
14325 }
14326
14327 ::std::vector<at::Tensor> self_;
14328 if (at::functionalization::impl::isFunctionalTensor(self)) {
14329 at::functionalization::impl::sync(self);
14330 self_ = at::functionalization::impl::from_functional_tensor(self);
14331 } else {
14332 self_ = self.vec();
14333 }
14334
14335 ::std::vector<at::Tensor> out_;
14336 if (at::functionalization::impl::isFunctionalTensor(out)) {
14337 at::functionalization::impl::sync(out);
14338 out_ = at::functionalization::impl::from_functional_tensor(out);
14339 } else {
14340 out_ = out.vec();
14341 }
14342 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
14343 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
14344 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14345 TORCH_INTERNAL_ASSERT(false,
14346 "mutating a non-functional tensor with a functional tensor is not allowed.",
14347 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14348 } else {
14349 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14350 at::AutoDispatchSkipFunctionalize guard;
14351 at::_ops::_foreach_ceil_out::call(self_, out_);
14352 ;
14353 }
14354 } else {
14355 ::std::vector<at::Tensor> tmp_output;
14356 {
14357 at::AutoDispatchSkipFunctionalize guard;
14358 tmp_output = at::_ops::_foreach_ceil::call(self_);
14359 }
14360 at::functionalization::impl::replace_(out, tmp_output);
14361 at::functionalization::impl::commit_update(out);
14362 at::functionalization::impl::sync(out);
14363
14364 }
14365 }
14366
14367 void _foreach_ceil_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
14368 if (true) {
14369 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14370 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14371 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14372 auto self_meta = to_meta(self);
14373 at::AutoDispatchSkipFunctionalize func_guard;
14374 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14375 at::_ops::_foreach_ceil_::call(self_meta);
14376 }
14377
14378 ::std::vector<at::Tensor> self_;
14379 if (at::functionalization::impl::isFunctionalTensor(self)) {
14380 at::functionalization::impl::sync(self);
14381 self_ = at::functionalization::impl::from_functional_tensor(self);
14382 } else {
14383 self_ = self.vec();
14384 }
14385 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
14386 if ((false)) {
14387 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14388 TORCH_INTERNAL_ASSERT(false,
14389 "mutating a non-functional tensor with a functional tensor is not allowed.",
14390 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14391 } else {
14392 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14393 at::AutoDispatchSkipFunctionalize guard;
14394 at::_ops::_foreach_ceil_::call(self_);
14395 ;
14396 }
14397 } else {
14398 ::std::vector<at::Tensor> tmp_output;
14399 {
14400 at::AutoDispatchSkipFunctionalize guard;
14401 tmp_output = at::_ops::_foreach_ceil::call(self_);
14402 }
14403 at::functionalization::impl::replace_(self, tmp_output);
14404 at::functionalization::impl::commit_update(self);
14405 at::functionalization::impl::sync(self);
14406
14407 }
14408 }
14409
14410 void _foreach_cosh_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
14411 if (false) {
14412 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14413 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14414 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14415 auto self_meta = to_meta(self);
14416 auto out_meta = to_meta(out);
14417 at::AutoDispatchSkipFunctionalize func_guard;
14418 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14419 at::_ops::_foreach_cosh_out::call(self_meta, out_meta);
14420 }
14421
14422 ::std::vector<at::Tensor> self_;
14423 if (at::functionalization::impl::isFunctionalTensor(self)) {
14424 at::functionalization::impl::sync(self);
14425 self_ = at::functionalization::impl::from_functional_tensor(self);
14426 } else {
14427 self_ = self.vec();
14428 }
14429
14430 ::std::vector<at::Tensor> out_;
14431 if (at::functionalization::impl::isFunctionalTensor(out)) {
14432 at::functionalization::impl::sync(out);
14433 out_ = at::functionalization::impl::from_functional_tensor(out);
14434 } else {
14435 out_ = out.vec();
14436 }
14437 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
14438 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
14439 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14440 TORCH_INTERNAL_ASSERT(false,
14441 "mutating a non-functional tensor with a functional tensor is not allowed.",
14442 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14443 } else {
14444 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14445 at::AutoDispatchSkipFunctionalize guard;
14446 at::_ops::_foreach_cosh_out::call(self_, out_);
14447 ;
14448 }
14449 } else {
14450 ::std::vector<at::Tensor> tmp_output;
14451 {
14452 at::AutoDispatchSkipFunctionalize guard;
14453 tmp_output = at::_ops::_foreach_cosh::call(self_);
14454 }
14455 at::functionalization::impl::replace_(out, tmp_output);
14456 at::functionalization::impl::commit_update(out);
14457 at::functionalization::impl::sync(out);
14458
14459 }
14460 }
14461
14462 void _foreach_cosh_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
14463 if (true) {
14464 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14465 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14466 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14467 auto self_meta = to_meta(self);
14468 at::AutoDispatchSkipFunctionalize func_guard;
14469 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14470 at::_ops::_foreach_cosh_::call(self_meta);
14471 }
14472
14473 ::std::vector<at::Tensor> self_;
14474 if (at::functionalization::impl::isFunctionalTensor(self)) {
14475 at::functionalization::impl::sync(self);
14476 self_ = at::functionalization::impl::from_functional_tensor(self);
14477 } else {
14478 self_ = self.vec();
14479 }
14480 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
14481 if ((false)) {
14482 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14483 TORCH_INTERNAL_ASSERT(false,
14484 "mutating a non-functional tensor with a functional tensor is not allowed.",
14485 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14486 } else {
14487 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14488 at::AutoDispatchSkipFunctionalize guard;
14489 at::_ops::_foreach_cosh_::call(self_);
14490 ;
14491 }
14492 } else {
14493 ::std::vector<at::Tensor> tmp_output;
14494 {
14495 at::AutoDispatchSkipFunctionalize guard;
14496 tmp_output = at::_ops::_foreach_cosh::call(self_);
14497 }
14498 at::functionalization::impl::replace_(self, tmp_output);
14499 at::functionalization::impl::commit_update(self);
14500 at::functionalization::impl::sync(self);
14501
14502 }
14503 }
14504
14505 void _foreach_log1p_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
14506 if (false) {
14507 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14508 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14509 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14510 auto self_meta = to_meta(self);
14511 auto out_meta = to_meta(out);
14512 at::AutoDispatchSkipFunctionalize func_guard;
14513 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14514 at::_ops::_foreach_log1p_out::call(self_meta, out_meta);
14515 }
14516
14517 ::std::vector<at::Tensor> self_;
14518 if (at::functionalization::impl::isFunctionalTensor(self)) {
14519 at::functionalization::impl::sync(self);
14520 self_ = at::functionalization::impl::from_functional_tensor(self);
14521 } else {
14522 self_ = self.vec();
14523 }
14524
14525 ::std::vector<at::Tensor> out_;
14526 if (at::functionalization::impl::isFunctionalTensor(out)) {
14527 at::functionalization::impl::sync(out);
14528 out_ = at::functionalization::impl::from_functional_tensor(out);
14529 } else {
14530 out_ = out.vec();
14531 }
14532 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
14533 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
14534 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14535 TORCH_INTERNAL_ASSERT(false,
14536 "mutating a non-functional tensor with a functional tensor is not allowed.",
14537 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14538 } else {
14539 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14540 at::AutoDispatchSkipFunctionalize guard;
14541 at::_ops::_foreach_log1p_out::call(self_, out_);
14542 ;
14543 }
14544 } else {
14545 ::std::vector<at::Tensor> tmp_output;
14546 {
14547 at::AutoDispatchSkipFunctionalize guard;
14548 tmp_output = at::_ops::_foreach_log1p::call(self_);
14549 }
14550 at::functionalization::impl::replace_(out, tmp_output);
14551 at::functionalization::impl::commit_update(out);
14552 at::functionalization::impl::sync(out);
14553
14554 }
14555 }
14556
14557 void _foreach_log1p_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
14558 if (true) {
14559 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14560 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14561 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14562 auto self_meta = to_meta(self);
14563 at::AutoDispatchSkipFunctionalize func_guard;
14564 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14565 at::_ops::_foreach_log1p_::call(self_meta);
14566 }
14567
14568 ::std::vector<at::Tensor> self_;
14569 if (at::functionalization::impl::isFunctionalTensor(self)) {
14570 at::functionalization::impl::sync(self);
14571 self_ = at::functionalization::impl::from_functional_tensor(self);
14572 } else {
14573 self_ = self.vec();
14574 }
14575 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
14576 if ((false)) {
14577 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14578 TORCH_INTERNAL_ASSERT(false,
14579 "mutating a non-functional tensor with a functional tensor is not allowed.",
14580 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14581 } else {
14582 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14583 at::AutoDispatchSkipFunctionalize guard;
14584 at::_ops::_foreach_log1p_::call(self_);
14585 ;
14586 }
14587 } else {
14588 ::std::vector<at::Tensor> tmp_output;
14589 {
14590 at::AutoDispatchSkipFunctionalize guard;
14591 tmp_output = at::_ops::_foreach_log1p::call(self_);
14592 }
14593 at::functionalization::impl::replace_(self, tmp_output);
14594 at::functionalization::impl::commit_update(self);
14595 at::functionalization::impl::sync(self);
14596
14597 }
14598 }
14599
14600 void _foreach_log2_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
14601 if (false) {
14602 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14603 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14604 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14605 auto self_meta = to_meta(self);
14606 auto out_meta = to_meta(out);
14607 at::AutoDispatchSkipFunctionalize func_guard;
14608 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14609 at::_ops::_foreach_log2_out::call(self_meta, out_meta);
14610 }
14611
14612 ::std::vector<at::Tensor> self_;
14613 if (at::functionalization::impl::isFunctionalTensor(self)) {
14614 at::functionalization::impl::sync(self);
14615 self_ = at::functionalization::impl::from_functional_tensor(self);
14616 } else {
14617 self_ = self.vec();
14618 }
14619
14620 ::std::vector<at::Tensor> out_;
14621 if (at::functionalization::impl::isFunctionalTensor(out)) {
14622 at::functionalization::impl::sync(out);
14623 out_ = at::functionalization::impl::from_functional_tensor(out);
14624 } else {
14625 out_ = out.vec();
14626 }
14627 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
14628 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
14629 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14630 TORCH_INTERNAL_ASSERT(false,
14631 "mutating a non-functional tensor with a functional tensor is not allowed.",
14632 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14633 } else {
14634 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14635 at::AutoDispatchSkipFunctionalize guard;
14636 at::_ops::_foreach_log2_out::call(self_, out_);
14637 ;
14638 }
14639 } else {
14640 ::std::vector<at::Tensor> tmp_output;
14641 {
14642 at::AutoDispatchSkipFunctionalize guard;
14643 tmp_output = at::_ops::_foreach_log2::call(self_);
14644 }
14645 at::functionalization::impl::replace_(out, tmp_output);
14646 at::functionalization::impl::commit_update(out);
14647 at::functionalization::impl::sync(out);
14648
14649 }
14650 }
14651
14652 void _foreach_log2_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
14653 if (true) {
14654 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14655 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14656 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14657 auto self_meta = to_meta(self);
14658 at::AutoDispatchSkipFunctionalize func_guard;
14659 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14660 at::_ops::_foreach_log2_::call(self_meta);
14661 }
14662
14663 ::std::vector<at::Tensor> self_;
14664 if (at::functionalization::impl::isFunctionalTensor(self)) {
14665 at::functionalization::impl::sync(self);
14666 self_ = at::functionalization::impl::from_functional_tensor(self);
14667 } else {
14668 self_ = self.vec();
14669 }
14670 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
14671 if ((false)) {
14672 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14673 TORCH_INTERNAL_ASSERT(false,
14674 "mutating a non-functional tensor with a functional tensor is not allowed.",
14675 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14676 } else {
14677 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14678 at::AutoDispatchSkipFunctionalize guard;
14679 at::_ops::_foreach_log2_::call(self_);
14680 ;
14681 }
14682 } else {
14683 ::std::vector<at::Tensor> tmp_output;
14684 {
14685 at::AutoDispatchSkipFunctionalize guard;
14686 tmp_output = at::_ops::_foreach_log2::call(self_);
14687 }
14688 at::functionalization::impl::replace_(self, tmp_output);
14689 at::functionalization::impl::commit_update(self);
14690 at::functionalization::impl::sync(self);
14691
14692 }
14693 }
14694
14695 void _foreach_round_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
14696 if (false) {
14697 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14698 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14699 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14700 auto self_meta = to_meta(self);
14701 auto out_meta = to_meta(out);
14702 at::AutoDispatchSkipFunctionalize func_guard;
14703 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14704 at::_ops::_foreach_round_out::call(self_meta, out_meta);
14705 }
14706
14707 ::std::vector<at::Tensor> self_;
14708 if (at::functionalization::impl::isFunctionalTensor(self)) {
14709 at::functionalization::impl::sync(self);
14710 self_ = at::functionalization::impl::from_functional_tensor(self);
14711 } else {
14712 self_ = self.vec();
14713 }
14714
14715 ::std::vector<at::Tensor> out_;
14716 if (at::functionalization::impl::isFunctionalTensor(out)) {
14717 at::functionalization::impl::sync(out);
14718 out_ = at::functionalization::impl::from_functional_tensor(out);
14719 } else {
14720 out_ = out.vec();
14721 }
14722 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
14723 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
14724 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14725 TORCH_INTERNAL_ASSERT(false,
14726 "mutating a non-functional tensor with a functional tensor is not allowed.",
14727 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14728 } else {
14729 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14730 at::AutoDispatchSkipFunctionalize guard;
14731 at::_ops::_foreach_round_out::call(self_, out_);
14732 ;
14733 }
14734 } else {
14735 ::std::vector<at::Tensor> tmp_output;
14736 {
14737 at::AutoDispatchSkipFunctionalize guard;
14738 tmp_output = at::_ops::_foreach_round::call(self_);
14739 }
14740 at::functionalization::impl::replace_(out, tmp_output);
14741 at::functionalization::impl::commit_update(out);
14742 at::functionalization::impl::sync(out);
14743
14744 }
14745 }
14746
14747 void _foreach_round_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
14748 if (true) {
14749 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14750 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14751 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14752 auto self_meta = to_meta(self);
14753 at::AutoDispatchSkipFunctionalize func_guard;
14754 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14755 at::_ops::_foreach_round_::call(self_meta);
14756 }
14757
14758 ::std::vector<at::Tensor> self_;
14759 if (at::functionalization::impl::isFunctionalTensor(self)) {
14760 at::functionalization::impl::sync(self);
14761 self_ = at::functionalization::impl::from_functional_tensor(self);
14762 } else {
14763 self_ = self.vec();
14764 }
14765 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
14766 if ((false)) {
14767 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14768 TORCH_INTERNAL_ASSERT(false,
14769 "mutating a non-functional tensor with a functional tensor is not allowed.",
14770 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14771 } else {
14772 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14773 at::AutoDispatchSkipFunctionalize guard;
14774 at::_ops::_foreach_round_::call(self_);
14775 ;
14776 }
14777 } else {
14778 ::std::vector<at::Tensor> tmp_output;
14779 {
14780 at::AutoDispatchSkipFunctionalize guard;
14781 tmp_output = at::_ops::_foreach_round::call(self_);
14782 }
14783 at::functionalization::impl::replace_(self, tmp_output);
14784 at::functionalization::impl::commit_update(self);
14785 at::functionalization::impl::sync(self);
14786
14787 }
14788 }
14789
14790 void _foreach_addcdiv_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
14791 if (false) {
14792 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14793 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14794 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14795 auto self_meta = to_meta(self);
14796 auto tensor1_meta = to_meta(tensor1);
14797 auto tensor2_meta = to_meta(tensor2);
14798 auto out_meta = to_meta(out);
14799 at::AutoDispatchSkipFunctionalize func_guard;
14800 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14801 at::_ops::_foreach_addcdiv_Scalar_out::call(self_meta, tensor1_meta, tensor2_meta, value, out_meta);
14802 }
14803
14804 ::std::vector<at::Tensor> self_;
14805 if (at::functionalization::impl::isFunctionalTensor(self)) {
14806 at::functionalization::impl::sync(self);
14807 self_ = at::functionalization::impl::from_functional_tensor(self);
14808 } else {
14809 self_ = self.vec();
14810 }
14811
14812 ::std::vector<at::Tensor> tensor1_;
14813 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
14814 at::functionalization::impl::sync(tensor1);
14815 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
14816 } else {
14817 tensor1_ = tensor1.vec();
14818 }
14819
14820 ::std::vector<at::Tensor> tensor2_;
14821 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
14822 at::functionalization::impl::sync(tensor2);
14823 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
14824 } else {
14825 tensor2_ = tensor2.vec();
14826 }
14827
14828 ::std::vector<at::Tensor> out_;
14829 if (at::functionalization::impl::isFunctionalTensor(out)) {
14830 at::functionalization::impl::sync(out);
14831 out_ = at::functionalization::impl::from_functional_tensor(out);
14832 } else {
14833 out_ = out.vec();
14834 }
14835 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
14836 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
14837 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14838 TORCH_INTERNAL_ASSERT(false,
14839 "mutating a non-functional tensor with a functional tensor is not allowed.",
14840 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14841 } else {
14842 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14843 at::AutoDispatchSkipFunctionalize guard;
14844 at::_ops::_foreach_addcdiv_Scalar_out::call(self_, tensor1_, tensor2_, value, out_);
14845 ;
14846 }
14847 } else {
14848 ::std::vector<at::Tensor> tmp_output;
14849 {
14850 at::AutoDispatchSkipFunctionalize guard;
14851 tmp_output = at::_ops::_foreach_addcdiv_Scalar::call(self_, tensor1_, tensor2_, value);
14852 }
14853 at::functionalization::impl::replace_(out, tmp_output);
14854 at::functionalization::impl::commit_update(out);
14855 at::functionalization::impl::sync(out);
14856
14857 }
14858 }
14859
14860 void _foreach_addcdiv__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
14861 if (true) {
14862 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14863 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14864 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14865 auto self_meta = to_meta(self);
14866 auto tensor1_meta = to_meta(tensor1);
14867 auto tensor2_meta = to_meta(tensor2);
14868 at::AutoDispatchSkipFunctionalize func_guard;
14869 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14870 at::_ops::_foreach_addcdiv__Scalar::call(self_meta, tensor1_meta, tensor2_meta, value);
14871 }
14872
14873 ::std::vector<at::Tensor> self_;
14874 if (at::functionalization::impl::isFunctionalTensor(self)) {
14875 at::functionalization::impl::sync(self);
14876 self_ = at::functionalization::impl::from_functional_tensor(self);
14877 } else {
14878 self_ = self.vec();
14879 }
14880
14881 ::std::vector<at::Tensor> tensor1_;
14882 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
14883 at::functionalization::impl::sync(tensor1);
14884 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
14885 } else {
14886 tensor1_ = tensor1.vec();
14887 }
14888
14889 ::std::vector<at::Tensor> tensor2_;
14890 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
14891 at::functionalization::impl::sync(tensor2);
14892 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
14893 } else {
14894 tensor2_ = tensor2.vec();
14895 }
14896 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
14897 if ((false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
14898 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14899 TORCH_INTERNAL_ASSERT(false,
14900 "mutating a non-functional tensor with a functional tensor is not allowed.",
14901 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14902 } else {
14903 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14904 at::AutoDispatchSkipFunctionalize guard;
14905 at::_ops::_foreach_addcdiv__Scalar::call(self_, tensor1_, tensor2_, value);
14906 ;
14907 }
14908 } else {
14909 ::std::vector<at::Tensor> tmp_output;
14910 {
14911 at::AutoDispatchSkipFunctionalize guard;
14912 tmp_output = at::_ops::_foreach_addcdiv_Scalar::call(self_, tensor1_, tensor2_, value);
14913 }
14914 at::functionalization::impl::replace_(self, tmp_output);
14915 at::functionalization::impl::commit_update(self);
14916 at::functionalization::impl::sync(self);
14917
14918 }
14919 }
14920
14921 void _foreach_addcmul_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
14922 if (false) {
14923 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14924 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14925 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14926 auto self_meta = to_meta(self);
14927 auto tensor1_meta = to_meta(tensor1);
14928 auto tensor2_meta = to_meta(tensor2);
14929 auto out_meta = to_meta(out);
14930 at::AutoDispatchSkipFunctionalize func_guard;
14931 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
14932 at::_ops::_foreach_addcmul_Scalar_out::call(self_meta, tensor1_meta, tensor2_meta, value, out_meta);
14933 }
14934
14935 ::std::vector<at::Tensor> self_;
14936 if (at::functionalization::impl::isFunctionalTensor(self)) {
14937 at::functionalization::impl::sync(self);
14938 self_ = at::functionalization::impl::from_functional_tensor(self);
14939 } else {
14940 self_ = self.vec();
14941 }
14942
14943 ::std::vector<at::Tensor> tensor1_;
14944 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
14945 at::functionalization::impl::sync(tensor1);
14946 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
14947 } else {
14948 tensor1_ = tensor1.vec();
14949 }
14950
14951 ::std::vector<at::Tensor> tensor2_;
14952 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
14953 at::functionalization::impl::sync(tensor2);
14954 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
14955 } else {
14956 tensor2_ = tensor2.vec();
14957 }
14958
14959 ::std::vector<at::Tensor> out_;
14960 if (at::functionalization::impl::isFunctionalTensor(out)) {
14961 at::functionalization::impl::sync(out);
14962 out_ = at::functionalization::impl::from_functional_tensor(out);
14963 } else {
14964 out_ = out.vec();
14965 }
14966 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
14967 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
14968 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
14969 TORCH_INTERNAL_ASSERT(false,
14970 "mutating a non-functional tensor with a functional tensor is not allowed.",
14971 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
14972 } else {
14973 // case 2: arguments are not functional tensors, so we no-op and redispatch.
14974 at::AutoDispatchSkipFunctionalize guard;
14975 at::_ops::_foreach_addcmul_Scalar_out::call(self_, tensor1_, tensor2_, value, out_);
14976 ;
14977 }
14978 } else {
14979 ::std::vector<at::Tensor> tmp_output;
14980 {
14981 at::AutoDispatchSkipFunctionalize guard;
14982 tmp_output = at::_ops::_foreach_addcmul_Scalar::call(self_, tensor1_, tensor2_, value);
14983 }
14984 at::functionalization::impl::replace_(out, tmp_output);
14985 at::functionalization::impl::commit_update(out);
14986 at::functionalization::impl::sync(out);
14987
14988 }
14989 }
14990
14991 void _foreach_addcmul__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
14992 if (true) {
14993 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
14994 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
14995 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
14996 auto self_meta = to_meta(self);
14997 auto tensor1_meta = to_meta(tensor1);
14998 auto tensor2_meta = to_meta(tensor2);
14999 at::AutoDispatchSkipFunctionalize func_guard;
15000 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15001 at::_ops::_foreach_addcmul__Scalar::call(self_meta, tensor1_meta, tensor2_meta, value);
15002 }
15003
15004 ::std::vector<at::Tensor> self_;
15005 if (at::functionalization::impl::isFunctionalTensor(self)) {
15006 at::functionalization::impl::sync(self);
15007 self_ = at::functionalization::impl::from_functional_tensor(self);
15008 } else {
15009 self_ = self.vec();
15010 }
15011
15012 ::std::vector<at::Tensor> tensor1_;
15013 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
15014 at::functionalization::impl::sync(tensor1);
15015 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
15016 } else {
15017 tensor1_ = tensor1.vec();
15018 }
15019
15020 ::std::vector<at::Tensor> tensor2_;
15021 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
15022 at::functionalization::impl::sync(tensor2);
15023 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
15024 } else {
15025 tensor2_ = tensor2.vec();
15026 }
15027 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
15028 if ((false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
15029 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15030 TORCH_INTERNAL_ASSERT(false,
15031 "mutating a non-functional tensor with a functional tensor is not allowed.",
15032 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15033 } else {
15034 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15035 at::AutoDispatchSkipFunctionalize guard;
15036 at::_ops::_foreach_addcmul__Scalar::call(self_, tensor1_, tensor2_, value);
15037 ;
15038 }
15039 } else {
15040 ::std::vector<at::Tensor> tmp_output;
15041 {
15042 at::AutoDispatchSkipFunctionalize guard;
15043 tmp_output = at::_ops::_foreach_addcmul_Scalar::call(self_, tensor1_, tensor2_, value);
15044 }
15045 at::functionalization::impl::replace_(self, tmp_output);
15046 at::functionalization::impl::commit_update(self);
15047 at::functionalization::impl::sync(self);
15048
15049 }
15050 }
15051
15052 void _foreach_addcdiv_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
15053 if (false) {
15054 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15055 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15056 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15057 auto self_meta = to_meta(self);
15058 auto tensor1_meta = to_meta(tensor1);
15059 auto tensor2_meta = to_meta(tensor2);
15060 auto out_meta = to_meta(out);
15061 at::AutoDispatchSkipFunctionalize func_guard;
15062 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15063 at::_ops::_foreach_addcdiv_ScalarList_out::call(self_meta, tensor1_meta, tensor2_meta, scalars, out_meta);
15064 }
15065
15066 ::std::vector<at::Tensor> self_;
15067 if (at::functionalization::impl::isFunctionalTensor(self)) {
15068 at::functionalization::impl::sync(self);
15069 self_ = at::functionalization::impl::from_functional_tensor(self);
15070 } else {
15071 self_ = self.vec();
15072 }
15073
15074 ::std::vector<at::Tensor> tensor1_;
15075 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
15076 at::functionalization::impl::sync(tensor1);
15077 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
15078 } else {
15079 tensor1_ = tensor1.vec();
15080 }
15081
15082 ::std::vector<at::Tensor> tensor2_;
15083 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
15084 at::functionalization::impl::sync(tensor2);
15085 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
15086 } else {
15087 tensor2_ = tensor2.vec();
15088 }
15089
15090 ::std::vector<at::Tensor> out_;
15091 if (at::functionalization::impl::isFunctionalTensor(out)) {
15092 at::functionalization::impl::sync(out);
15093 out_ = at::functionalization::impl::from_functional_tensor(out);
15094 } else {
15095 out_ = out.vec();
15096 }
15097 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
15098 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
15099 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15100 TORCH_INTERNAL_ASSERT(false,
15101 "mutating a non-functional tensor with a functional tensor is not allowed.",
15102 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15103 } else {
15104 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15105 at::AutoDispatchSkipFunctionalize guard;
15106 at::_ops::_foreach_addcdiv_ScalarList_out::call(self_, tensor1_, tensor2_, scalars, out_);
15107 ;
15108 }
15109 } else {
15110 ::std::vector<at::Tensor> tmp_output;
15111 {
15112 at::AutoDispatchSkipFunctionalize guard;
15113 tmp_output = at::_ops::_foreach_addcdiv_ScalarList::call(self_, tensor1_, tensor2_, scalars);
15114 }
15115 at::functionalization::impl::replace_(out, tmp_output);
15116 at::functionalization::impl::commit_update(out);
15117 at::functionalization::impl::sync(out);
15118
15119 }
15120 }
15121
15122 void _foreach_addcdiv__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
15123 if (true) {
15124 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15125 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15126 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15127 auto self_meta = to_meta(self);
15128 auto tensor1_meta = to_meta(tensor1);
15129 auto tensor2_meta = to_meta(tensor2);
15130 at::AutoDispatchSkipFunctionalize func_guard;
15131 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15132 at::_ops::_foreach_addcdiv__ScalarList::call(self_meta, tensor1_meta, tensor2_meta, scalars);
15133 }
15134
15135 ::std::vector<at::Tensor> self_;
15136 if (at::functionalization::impl::isFunctionalTensor(self)) {
15137 at::functionalization::impl::sync(self);
15138 self_ = at::functionalization::impl::from_functional_tensor(self);
15139 } else {
15140 self_ = self.vec();
15141 }
15142
15143 ::std::vector<at::Tensor> tensor1_;
15144 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
15145 at::functionalization::impl::sync(tensor1);
15146 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
15147 } else {
15148 tensor1_ = tensor1.vec();
15149 }
15150
15151 ::std::vector<at::Tensor> tensor2_;
15152 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
15153 at::functionalization::impl::sync(tensor2);
15154 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
15155 } else {
15156 tensor2_ = tensor2.vec();
15157 }
15158 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
15159 if ((false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
15160 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15161 TORCH_INTERNAL_ASSERT(false,
15162 "mutating a non-functional tensor with a functional tensor is not allowed.",
15163 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15164 } else {
15165 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15166 at::AutoDispatchSkipFunctionalize guard;
15167 at::_ops::_foreach_addcdiv__ScalarList::call(self_, tensor1_, tensor2_, scalars);
15168 ;
15169 }
15170 } else {
15171 ::std::vector<at::Tensor> tmp_output;
15172 {
15173 at::AutoDispatchSkipFunctionalize guard;
15174 tmp_output = at::_ops::_foreach_addcdiv_ScalarList::call(self_, tensor1_, tensor2_, scalars);
15175 }
15176 at::functionalization::impl::replace_(self, tmp_output);
15177 at::functionalization::impl::commit_update(self);
15178 at::functionalization::impl::sync(self);
15179
15180 }
15181 }
15182
15183 void _foreach_addcdiv_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
15184 if (false) {
15185 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15186 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15187 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15188 auto self_meta = to_meta(self);
15189 auto tensor1_meta = to_meta(tensor1);
15190 auto tensor2_meta = to_meta(tensor2);
15191 auto scalars_meta = to_meta(scalars);
15192 auto out_meta = to_meta(out);
15193 at::AutoDispatchSkipFunctionalize func_guard;
15194 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15195 at::_ops::_foreach_addcdiv_Tensor_out::call(self_meta, tensor1_meta, tensor2_meta, scalars_meta, out_meta);
15196 }
15197
15198 ::std::vector<at::Tensor> self_;
15199 if (at::functionalization::impl::isFunctionalTensor(self)) {
15200 at::functionalization::impl::sync(self);
15201 self_ = at::functionalization::impl::from_functional_tensor(self);
15202 } else {
15203 self_ = self.vec();
15204 }
15205
15206 ::std::vector<at::Tensor> tensor1_;
15207 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
15208 at::functionalization::impl::sync(tensor1);
15209 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
15210 } else {
15211 tensor1_ = tensor1.vec();
15212 }
15213
15214 ::std::vector<at::Tensor> tensor2_;
15215 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
15216 at::functionalization::impl::sync(tensor2);
15217 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
15218 } else {
15219 tensor2_ = tensor2.vec();
15220 }
15221
15222 at::Tensor scalars_;
15223 if (at::functionalization::impl::isFunctionalTensor(scalars)) {
15224 at::functionalization::impl::sync(scalars);
15225 scalars_ = at::functionalization::impl::from_functional_tensor(scalars);
15226 } else {
15227 scalars_ = scalars;
15228 }
15229
15230 ::std::vector<at::Tensor> out_;
15231 if (at::functionalization::impl::isFunctionalTensor(out)) {
15232 at::functionalization::impl::sync(out);
15233 out_ = at::functionalization::impl::from_functional_tensor(out);
15234 } else {
15235 out_ = out.vec();
15236 }
15237 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
15238 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2) || at::functionalization::impl::isFunctionalTensor(scalars))) {
15239 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15240 TORCH_INTERNAL_ASSERT(false,
15241 "mutating a non-functional tensor with a functional tensor is not allowed.",
15242 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15243 } else {
15244 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15245 at::AutoDispatchSkipFunctionalize guard;
15246 at::_ops::_foreach_addcdiv_Tensor_out::call(self_, tensor1_, tensor2_, scalars_, out_);
15247 ;
15248 }
15249 } else {
15250 ::std::vector<at::Tensor> tmp_output;
15251 {
15252 at::AutoDispatchSkipFunctionalize guard;
15253 tmp_output = at::_ops::_foreach_addcdiv_Tensor::call(self_, tensor1_, tensor2_, scalars_);
15254 }
15255 at::functionalization::impl::replace_(out, tmp_output);
15256 at::functionalization::impl::commit_update(out);
15257 at::functionalization::impl::sync(out);
15258
15259 }
15260 }
15261
15262 void _foreach_addcdiv__Tensor(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
15263 if (true) {
15264 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15265 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15266 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15267 auto self_meta = to_meta(self);
15268 auto tensor1_meta = to_meta(tensor1);
15269 auto tensor2_meta = to_meta(tensor2);
15270 auto scalars_meta = to_meta(scalars);
15271 at::AutoDispatchSkipFunctionalize func_guard;
15272 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15273 at::_ops::_foreach_addcdiv__Tensor::call(self_meta, tensor1_meta, tensor2_meta, scalars_meta);
15274 }
15275
15276 ::std::vector<at::Tensor> self_;
15277 if (at::functionalization::impl::isFunctionalTensor(self)) {
15278 at::functionalization::impl::sync(self);
15279 self_ = at::functionalization::impl::from_functional_tensor(self);
15280 } else {
15281 self_ = self.vec();
15282 }
15283
15284 ::std::vector<at::Tensor> tensor1_;
15285 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
15286 at::functionalization::impl::sync(tensor1);
15287 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
15288 } else {
15289 tensor1_ = tensor1.vec();
15290 }
15291
15292 ::std::vector<at::Tensor> tensor2_;
15293 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
15294 at::functionalization::impl::sync(tensor2);
15295 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
15296 } else {
15297 tensor2_ = tensor2.vec();
15298 }
15299
15300 at::Tensor scalars_;
15301 if (at::functionalization::impl::isFunctionalTensor(scalars)) {
15302 at::functionalization::impl::sync(scalars);
15303 scalars_ = at::functionalization::impl::from_functional_tensor(scalars);
15304 } else {
15305 scalars_ = scalars;
15306 }
15307 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
15308 if ((false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2) || at::functionalization::impl::isFunctionalTensor(scalars))) {
15309 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15310 TORCH_INTERNAL_ASSERT(false,
15311 "mutating a non-functional tensor with a functional tensor is not allowed.",
15312 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15313 } else {
15314 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15315 at::AutoDispatchSkipFunctionalize guard;
15316 at::_ops::_foreach_addcdiv__Tensor::call(self_, tensor1_, tensor2_, scalars_);
15317 ;
15318 }
15319 } else {
15320 ::std::vector<at::Tensor> tmp_output;
15321 {
15322 at::AutoDispatchSkipFunctionalize guard;
15323 tmp_output = at::_ops::_foreach_addcdiv_Tensor::call(self_, tensor1_, tensor2_, scalars_);
15324 }
15325 at::functionalization::impl::replace_(self, tmp_output);
15326 at::functionalization::impl::commit_update(self);
15327 at::functionalization::impl::sync(self);
15328
15329 }
15330 }
15331
15332 void _foreach_addcmul_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
15333 if (false) {
15334 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15335 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15336 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15337 auto self_meta = to_meta(self);
15338 auto tensor1_meta = to_meta(tensor1);
15339 auto tensor2_meta = to_meta(tensor2);
15340 auto out_meta = to_meta(out);
15341 at::AutoDispatchSkipFunctionalize func_guard;
15342 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15343 at::_ops::_foreach_addcmul_ScalarList_out::call(self_meta, tensor1_meta, tensor2_meta, scalars, out_meta);
15344 }
15345
15346 ::std::vector<at::Tensor> self_;
15347 if (at::functionalization::impl::isFunctionalTensor(self)) {
15348 at::functionalization::impl::sync(self);
15349 self_ = at::functionalization::impl::from_functional_tensor(self);
15350 } else {
15351 self_ = self.vec();
15352 }
15353
15354 ::std::vector<at::Tensor> tensor1_;
15355 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
15356 at::functionalization::impl::sync(tensor1);
15357 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
15358 } else {
15359 tensor1_ = tensor1.vec();
15360 }
15361
15362 ::std::vector<at::Tensor> tensor2_;
15363 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
15364 at::functionalization::impl::sync(tensor2);
15365 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
15366 } else {
15367 tensor2_ = tensor2.vec();
15368 }
15369
15370 ::std::vector<at::Tensor> out_;
15371 if (at::functionalization::impl::isFunctionalTensor(out)) {
15372 at::functionalization::impl::sync(out);
15373 out_ = at::functionalization::impl::from_functional_tensor(out);
15374 } else {
15375 out_ = out.vec();
15376 }
15377 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
15378 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
15379 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15380 TORCH_INTERNAL_ASSERT(false,
15381 "mutating a non-functional tensor with a functional tensor is not allowed.",
15382 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15383 } else {
15384 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15385 at::AutoDispatchSkipFunctionalize guard;
15386 at::_ops::_foreach_addcmul_ScalarList_out::call(self_, tensor1_, tensor2_, scalars, out_);
15387 ;
15388 }
15389 } else {
15390 ::std::vector<at::Tensor> tmp_output;
15391 {
15392 at::AutoDispatchSkipFunctionalize guard;
15393 tmp_output = at::_ops::_foreach_addcmul_ScalarList::call(self_, tensor1_, tensor2_, scalars);
15394 }
15395 at::functionalization::impl::replace_(out, tmp_output);
15396 at::functionalization::impl::commit_update(out);
15397 at::functionalization::impl::sync(out);
15398
15399 }
15400 }
15401
15402 void _foreach_addcmul__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
15403 if (true) {
15404 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15405 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15406 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15407 auto self_meta = to_meta(self);
15408 auto tensor1_meta = to_meta(tensor1);
15409 auto tensor2_meta = to_meta(tensor2);
15410 at::AutoDispatchSkipFunctionalize func_guard;
15411 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15412 at::_ops::_foreach_addcmul__ScalarList::call(self_meta, tensor1_meta, tensor2_meta, scalars);
15413 }
15414
15415 ::std::vector<at::Tensor> self_;
15416 if (at::functionalization::impl::isFunctionalTensor(self)) {
15417 at::functionalization::impl::sync(self);
15418 self_ = at::functionalization::impl::from_functional_tensor(self);
15419 } else {
15420 self_ = self.vec();
15421 }
15422
15423 ::std::vector<at::Tensor> tensor1_;
15424 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
15425 at::functionalization::impl::sync(tensor1);
15426 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
15427 } else {
15428 tensor1_ = tensor1.vec();
15429 }
15430
15431 ::std::vector<at::Tensor> tensor2_;
15432 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
15433 at::functionalization::impl::sync(tensor2);
15434 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
15435 } else {
15436 tensor2_ = tensor2.vec();
15437 }
15438 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
15439 if ((false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
15440 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15441 TORCH_INTERNAL_ASSERT(false,
15442 "mutating a non-functional tensor with a functional tensor is not allowed.",
15443 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15444 } else {
15445 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15446 at::AutoDispatchSkipFunctionalize guard;
15447 at::_ops::_foreach_addcmul__ScalarList::call(self_, tensor1_, tensor2_, scalars);
15448 ;
15449 }
15450 } else {
15451 ::std::vector<at::Tensor> tmp_output;
15452 {
15453 at::AutoDispatchSkipFunctionalize guard;
15454 tmp_output = at::_ops::_foreach_addcmul_ScalarList::call(self_, tensor1_, tensor2_, scalars);
15455 }
15456 at::functionalization::impl::replace_(self, tmp_output);
15457 at::functionalization::impl::commit_update(self);
15458 at::functionalization::impl::sync(self);
15459
15460 }
15461 }
15462
15463 void _foreach_addcmul_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
15464 if (false) {
15465 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15466 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15467 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15468 auto self_meta = to_meta(self);
15469 auto tensor1_meta = to_meta(tensor1);
15470 auto tensor2_meta = to_meta(tensor2);
15471 auto scalars_meta = to_meta(scalars);
15472 auto out_meta = to_meta(out);
15473 at::AutoDispatchSkipFunctionalize func_guard;
15474 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15475 at::_ops::_foreach_addcmul_Tensor_out::call(self_meta, tensor1_meta, tensor2_meta, scalars_meta, out_meta);
15476 }
15477
15478 ::std::vector<at::Tensor> self_;
15479 if (at::functionalization::impl::isFunctionalTensor(self)) {
15480 at::functionalization::impl::sync(self);
15481 self_ = at::functionalization::impl::from_functional_tensor(self);
15482 } else {
15483 self_ = self.vec();
15484 }
15485
15486 ::std::vector<at::Tensor> tensor1_;
15487 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
15488 at::functionalization::impl::sync(tensor1);
15489 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
15490 } else {
15491 tensor1_ = tensor1.vec();
15492 }
15493
15494 ::std::vector<at::Tensor> tensor2_;
15495 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
15496 at::functionalization::impl::sync(tensor2);
15497 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
15498 } else {
15499 tensor2_ = tensor2.vec();
15500 }
15501
15502 at::Tensor scalars_;
15503 if (at::functionalization::impl::isFunctionalTensor(scalars)) {
15504 at::functionalization::impl::sync(scalars);
15505 scalars_ = at::functionalization::impl::from_functional_tensor(scalars);
15506 } else {
15507 scalars_ = scalars;
15508 }
15509
15510 ::std::vector<at::Tensor> out_;
15511 if (at::functionalization::impl::isFunctionalTensor(out)) {
15512 at::functionalization::impl::sync(out);
15513 out_ = at::functionalization::impl::from_functional_tensor(out);
15514 } else {
15515 out_ = out.vec();
15516 }
15517 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
15518 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2) || at::functionalization::impl::isFunctionalTensor(scalars))) {
15519 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15520 TORCH_INTERNAL_ASSERT(false,
15521 "mutating a non-functional tensor with a functional tensor is not allowed.",
15522 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15523 } else {
15524 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15525 at::AutoDispatchSkipFunctionalize guard;
15526 at::_ops::_foreach_addcmul_Tensor_out::call(self_, tensor1_, tensor2_, scalars_, out_);
15527 ;
15528 }
15529 } else {
15530 ::std::vector<at::Tensor> tmp_output;
15531 {
15532 at::AutoDispatchSkipFunctionalize guard;
15533 tmp_output = at::_ops::_foreach_addcmul_Tensor::call(self_, tensor1_, tensor2_, scalars_);
15534 }
15535 at::functionalization::impl::replace_(out, tmp_output);
15536 at::functionalization::impl::commit_update(out);
15537 at::functionalization::impl::sync(out);
15538
15539 }
15540 }
15541
15542 void _foreach_addcmul__Tensor(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
15543 if (true) {
15544 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15545 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15546 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15547 auto self_meta = to_meta(self);
15548 auto tensor1_meta = to_meta(tensor1);
15549 auto tensor2_meta = to_meta(tensor2);
15550 auto scalars_meta = to_meta(scalars);
15551 at::AutoDispatchSkipFunctionalize func_guard;
15552 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15553 at::_ops::_foreach_addcmul__Tensor::call(self_meta, tensor1_meta, tensor2_meta, scalars_meta);
15554 }
15555
15556 ::std::vector<at::Tensor> self_;
15557 if (at::functionalization::impl::isFunctionalTensor(self)) {
15558 at::functionalization::impl::sync(self);
15559 self_ = at::functionalization::impl::from_functional_tensor(self);
15560 } else {
15561 self_ = self.vec();
15562 }
15563
15564 ::std::vector<at::Tensor> tensor1_;
15565 if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
15566 at::functionalization::impl::sync(tensor1);
15567 tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
15568 } else {
15569 tensor1_ = tensor1.vec();
15570 }
15571
15572 ::std::vector<at::Tensor> tensor2_;
15573 if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
15574 at::functionalization::impl::sync(tensor2);
15575 tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
15576 } else {
15577 tensor2_ = tensor2.vec();
15578 }
15579
15580 at::Tensor scalars_;
15581 if (at::functionalization::impl::isFunctionalTensor(scalars)) {
15582 at::functionalization::impl::sync(scalars);
15583 scalars_ = at::functionalization::impl::from_functional_tensor(scalars);
15584 } else {
15585 scalars_ = scalars;
15586 }
15587 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
15588 if ((false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2) || at::functionalization::impl::isFunctionalTensor(scalars))) {
15589 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15590 TORCH_INTERNAL_ASSERT(false,
15591 "mutating a non-functional tensor with a functional tensor is not allowed.",
15592 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15593 } else {
15594 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15595 at::AutoDispatchSkipFunctionalize guard;
15596 at::_ops::_foreach_addcmul__Tensor::call(self_, tensor1_, tensor2_, scalars_);
15597 ;
15598 }
15599 } else {
15600 ::std::vector<at::Tensor> tmp_output;
15601 {
15602 at::AutoDispatchSkipFunctionalize guard;
15603 tmp_output = at::_ops::_foreach_addcmul_Tensor::call(self_, tensor1_, tensor2_, scalars_);
15604 }
15605 at::functionalization::impl::replace_(self, tmp_output);
15606 at::functionalization::impl::commit_update(self);
15607 at::functionalization::impl::sync(self);
15608
15609 }
15610 }
15611
15612 at::Tensor & bucketize_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
15613 if (false) {
15614 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15615 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15616 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15617 auto self_meta = to_meta(self);
15618 auto boundaries_meta = to_meta(boundaries);
15619 auto out_meta = to_meta(out);
15620 at::AutoDispatchSkipFunctionalize func_guard;
15621 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15622 at::_ops::bucketize_Tensor_out::call(self_meta, boundaries_meta, out_int32, right, out_meta);
15623 }
15624
15625 at::Tensor self_;
15626 if (at::functionalization::impl::isFunctionalTensor(self)) {
15627 at::functionalization::impl::sync(self);
15628 self_ = at::functionalization::impl::from_functional_tensor(self);
15629 } else {
15630 self_ = self;
15631 }
15632
15633 at::Tensor boundaries_;
15634 if (at::functionalization::impl::isFunctionalTensor(boundaries)) {
15635 at::functionalization::impl::sync(boundaries);
15636 boundaries_ = at::functionalization::impl::from_functional_tensor(boundaries);
15637 } else {
15638 boundaries_ = boundaries;
15639 }
15640
15641 at::Tensor out_;
15642 if (at::functionalization::impl::isFunctionalTensor(out)) {
15643 at::functionalization::impl::sync(out);
15644 out_ = at::functionalization::impl::from_functional_tensor(out);
15645 } else {
15646 out_ = out;
15647 }
15648 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
15649 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(boundaries))) {
15650 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15651 TORCH_INTERNAL_ASSERT(false,
15652 "mutating a non-functional tensor with a functional tensor is not allowed.",
15653 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15654 } else {
15655 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15656 at::AutoDispatchSkipFunctionalize guard;
15657 at::Tensor tmp_output = at::_ops::bucketize_Tensor_out::call(self_, boundaries_, out_int32, right, out_);
15658 return out;;
15659 }
15660 } else {
15661 at::Tensor tmp_output;
15662 {
15663 at::AutoDispatchSkipFunctionalize guard;
15664 tmp_output = at::_ops::bucketize_Tensor::call(self_, boundaries_, out_int32, right);
15665 }
15666 at::functionalization::impl::replace_(out, tmp_output);
15667 at::functionalization::impl::commit_update(out);
15668 at::functionalization::impl::sync(out);
15669 return out;
15670 }
15671 }
15672
15673 at::Tensor & bucketize_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
15674 if (false) {
15675 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15676 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15677 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15678 auto boundaries_meta = to_meta(boundaries);
15679 auto out_meta = to_meta(out);
15680 at::AutoDispatchSkipFunctionalize func_guard;
15681 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15682 at::_ops::bucketize_Scalar_out::call(self, boundaries_meta, out_int32, right, out_meta);
15683 }
15684
15685 at::Tensor boundaries_;
15686 if (at::functionalization::impl::isFunctionalTensor(boundaries)) {
15687 at::functionalization::impl::sync(boundaries);
15688 boundaries_ = at::functionalization::impl::from_functional_tensor(boundaries);
15689 } else {
15690 boundaries_ = boundaries;
15691 }
15692
15693 at::Tensor out_;
15694 if (at::functionalization::impl::isFunctionalTensor(out)) {
15695 at::functionalization::impl::sync(out);
15696 out_ = at::functionalization::impl::from_functional_tensor(out);
15697 } else {
15698 out_ = out;
15699 }
15700 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
15701 if ((false || at::functionalization::impl::isFunctionalTensor(boundaries))) {
15702 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15703 TORCH_INTERNAL_ASSERT(false,
15704 "mutating a non-functional tensor with a functional tensor is not allowed.",
15705 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15706 } else {
15707 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15708 at::AutoDispatchSkipFunctionalize guard;
15709 at::Tensor tmp_output = at::_ops::bucketize_Scalar_out::call(self, boundaries_, out_int32, right, out_);
15710 return out;;
15711 }
15712 } else {
15713 at::Tensor tmp_output;
15714 {
15715 at::AutoDispatchSkipFunctionalize guard;
15716 tmp_output = at::_ops::bucketize_Scalar::call(self, boundaries_, out_int32, right);
15717 }
15718 at::functionalization::impl::replace_(out, tmp_output);
15719 at::functionalization::impl::commit_update(out);
15720 at::functionalization::impl::sync(out);
15721 return out;
15722 }
15723 }
15724
15725 at::Tensor & mse_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
15726 if (false) {
15727 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15728 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15729 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15730 auto self_meta = to_meta(self);
15731 auto target_meta = to_meta(target);
15732 auto out_meta = to_meta(out);
15733 at::AutoDispatchSkipFunctionalize func_guard;
15734 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15735 at::_ops::mse_loss_out::call(self_meta, target_meta, reduction, out_meta);
15736 }
15737
15738 at::Tensor self_;
15739 if (at::functionalization::impl::isFunctionalTensor(self)) {
15740 at::functionalization::impl::sync(self);
15741 self_ = at::functionalization::impl::from_functional_tensor(self);
15742 } else {
15743 self_ = self;
15744 }
15745
15746 at::Tensor target_;
15747 if (at::functionalization::impl::isFunctionalTensor(target)) {
15748 at::functionalization::impl::sync(target);
15749 target_ = at::functionalization::impl::from_functional_tensor(target);
15750 } else {
15751 target_ = target;
15752 }
15753
15754 at::Tensor out_;
15755 if (at::functionalization::impl::isFunctionalTensor(out)) {
15756 at::functionalization::impl::sync(out);
15757 out_ = at::functionalization::impl::from_functional_tensor(out);
15758 } else {
15759 out_ = out;
15760 }
15761 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
15762 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
15763 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15764 TORCH_INTERNAL_ASSERT(false,
15765 "mutating a non-functional tensor with a functional tensor is not allowed.",
15766 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15767 } else {
15768 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15769 at::AutoDispatchSkipFunctionalize guard;
15770 at::Tensor tmp_output = at::_ops::mse_loss_out::call(self_, target_, reduction, out_);
15771 return out;;
15772 }
15773 } else {
15774 at::Tensor tmp_output;
15775 {
15776 at::AutoDispatchSkipFunctionalize guard;
15777 tmp_output = at::_ops::mse_loss::call(self_, target_, reduction);
15778 }
15779 at::functionalization::impl::replace_(out, tmp_output);
15780 at::functionalization::impl::commit_update(out);
15781 at::functionalization::impl::sync(out);
15782 return out;
15783 }
15784 }
15785
15786 at::Tensor & multi_margin_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
15787 if (false) {
15788 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15789 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15790 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15791 auto self_meta = to_meta(self);
15792 auto target_meta = to_meta(target);
15793 auto weight_meta = to_meta(weight);
15794 auto out_meta = to_meta(out);
15795 at::AutoDispatchSkipFunctionalize func_guard;
15796 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15797 at::_ops::multi_margin_loss_out::call(self_meta, target_meta, p, margin, weight_meta, reduction, out_meta);
15798 }
15799
15800 at::Tensor self_;
15801 if (at::functionalization::impl::isFunctionalTensor(self)) {
15802 at::functionalization::impl::sync(self);
15803 self_ = at::functionalization::impl::from_functional_tensor(self);
15804 } else {
15805 self_ = self;
15806 }
15807
15808 at::Tensor target_;
15809 if (at::functionalization::impl::isFunctionalTensor(target)) {
15810 at::functionalization::impl::sync(target);
15811 target_ = at::functionalization::impl::from_functional_tensor(target);
15812 } else {
15813 target_ = target;
15814 }
15815
15816 c10::optional<at::Tensor> weight_;
15817 if (at::functionalization::impl::isFunctionalTensor(weight)) {
15818 at::functionalization::impl::sync(weight);
15819 weight_ = at::functionalization::impl::from_functional_tensor(weight);
15820 } else {
15821 weight_ = weight;
15822 }
15823
15824 at::Tensor out_;
15825 if (at::functionalization::impl::isFunctionalTensor(out)) {
15826 at::functionalization::impl::sync(out);
15827 out_ = at::functionalization::impl::from_functional_tensor(out);
15828 } else {
15829 out_ = out;
15830 }
15831 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
15832 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) {
15833 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15834 TORCH_INTERNAL_ASSERT(false,
15835 "mutating a non-functional tensor with a functional tensor is not allowed.",
15836 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15837 } else {
15838 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15839 at::AutoDispatchSkipFunctionalize guard;
15840 at::Tensor tmp_output = at::_ops::multi_margin_loss_out::call(self_, target_, p, margin, weight_, reduction, out_);
15841 return out;;
15842 }
15843 } else {
15844 at::Tensor tmp_output;
15845 {
15846 at::AutoDispatchSkipFunctionalize guard;
15847 tmp_output = at::_ops::multi_margin_loss::call(self_, target_, p, margin, weight_, reduction);
15848 }
15849 at::functionalization::impl::replace_(out, tmp_output);
15850 at::functionalization::impl::commit_update(out);
15851 at::functionalization::impl::sync(out);
15852 return out;
15853 }
15854 }
15855
15856 at::Tensor & multilabel_margin_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
15857 if (false) {
15858 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15859 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15860 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15861 auto self_meta = to_meta(self);
15862 auto target_meta = to_meta(target);
15863 auto out_meta = to_meta(out);
15864 at::AutoDispatchSkipFunctionalize func_guard;
15865 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15866 at::_ops::multilabel_margin_loss_out::call(self_meta, target_meta, reduction, out_meta);
15867 }
15868
15869 at::Tensor self_;
15870 if (at::functionalization::impl::isFunctionalTensor(self)) {
15871 at::functionalization::impl::sync(self);
15872 self_ = at::functionalization::impl::from_functional_tensor(self);
15873 } else {
15874 self_ = self;
15875 }
15876
15877 at::Tensor target_;
15878 if (at::functionalization::impl::isFunctionalTensor(target)) {
15879 at::functionalization::impl::sync(target);
15880 target_ = at::functionalization::impl::from_functional_tensor(target);
15881 } else {
15882 target_ = target;
15883 }
15884
15885 at::Tensor out_;
15886 if (at::functionalization::impl::isFunctionalTensor(out)) {
15887 at::functionalization::impl::sync(out);
15888 out_ = at::functionalization::impl::from_functional_tensor(out);
15889 } else {
15890 out_ = out;
15891 }
15892 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
15893 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
15894 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15895 TORCH_INTERNAL_ASSERT(false,
15896 "mutating a non-functional tensor with a functional tensor is not allowed.",
15897 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15898 } else {
15899 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15900 at::AutoDispatchSkipFunctionalize guard;
15901 at::Tensor tmp_output = at::_ops::multilabel_margin_loss_out::call(self_, target_, reduction, out_);
15902 return out;;
15903 }
15904 } else {
15905 at::Tensor tmp_output;
15906 {
15907 at::AutoDispatchSkipFunctionalize guard;
15908 tmp_output = at::_ops::multilabel_margin_loss::call(self_, target_, reduction);
15909 }
15910 at::functionalization::impl::replace_(out, tmp_output);
15911 at::functionalization::impl::commit_update(out);
15912 at::functionalization::impl::sync(out);
15913 return out;
15914 }
15915 }
15916
15917 ::std::tuple<at::Tensor &,at::Tensor &> multilabel_margin_loss_forward_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & output, at::Tensor & is_target) {
15918 if (false) {
15919 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15920 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15921 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15922 auto self_meta = to_meta(self);
15923 auto target_meta = to_meta(target);
15924 auto output_meta = to_meta(output);
15925 auto is_target_meta = to_meta(is_target);
15926 at::AutoDispatchSkipFunctionalize func_guard;
15927 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
15928 at::_ops::multilabel_margin_loss_forward_output::call(self_meta, target_meta, reduction, output_meta, is_target_meta);
15929 }
15930
15931 at::Tensor self_;
15932 if (at::functionalization::impl::isFunctionalTensor(self)) {
15933 at::functionalization::impl::sync(self);
15934 self_ = at::functionalization::impl::from_functional_tensor(self);
15935 } else {
15936 self_ = self;
15937 }
15938
15939 at::Tensor target_;
15940 if (at::functionalization::impl::isFunctionalTensor(target)) {
15941 at::functionalization::impl::sync(target);
15942 target_ = at::functionalization::impl::from_functional_tensor(target);
15943 } else {
15944 target_ = target;
15945 }
15946
15947 at::Tensor output_;
15948 if (at::functionalization::impl::isFunctionalTensor(output)) {
15949 at::functionalization::impl::sync(output);
15950 output_ = at::functionalization::impl::from_functional_tensor(output);
15951 } else {
15952 output_ = output;
15953 }
15954
15955 at::Tensor is_target_;
15956 if (at::functionalization::impl::isFunctionalTensor(is_target)) {
15957 at::functionalization::impl::sync(is_target);
15958 is_target_ = at::functionalization::impl::from_functional_tensor(is_target);
15959 } else {
15960 is_target_ = is_target;
15961 }
15962 if (!(true && at::functionalization::impl::isFunctionalTensor(output) && at::functionalization::impl::isFunctionalTensor(is_target))) {
15963 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
15964 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
15965 TORCH_INTERNAL_ASSERT(false,
15966 "mutating a non-functional tensor with a functional tensor is not allowed.",
15967 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
15968 } else {
15969 // case 2: arguments are not functional tensors, so we no-op and redispatch.
15970 at::AutoDispatchSkipFunctionalize guard;
15971 ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::multilabel_margin_loss_forward_output::call(self_, target_, reduction, output_, is_target_);
15972 return ::std::tuple<at::Tensor &,at::Tensor &>(output, is_target);;
15973 }
15974 } else {
15975 ::std::tuple<at::Tensor,at::Tensor> tmp_output;
15976 {
15977 at::AutoDispatchSkipFunctionalize guard;
15978 tmp_output = at::_ops::multilabel_margin_loss_forward::call(self_, target_, reduction);
15979 }
15980 at::functionalization::impl::replace_(output, std::get<0>(tmp_output));
15981 at::functionalization::impl::commit_update(output);
15982 at::functionalization::impl::sync(output);
15983 at::functionalization::impl::replace_(is_target, std::get<1>(tmp_output));
15984 at::functionalization::impl::commit_update(is_target);
15985 at::functionalization::impl::sync(is_target);
15986 return ::std::tuple<at::Tensor &,at::Tensor &>(output, is_target);
15987 }
15988 }
15989
15990 ::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
15991 if (false) {
15992 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
15993 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
15994 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
15995 auto self_meta = to_meta(self);
15996 auto target_meta = to_meta(target);
15997 auto weight_meta = to_meta(weight);
15998 auto output_meta = to_meta(output);
15999 auto total_weight_meta = to_meta(total_weight);
16000 at::AutoDispatchSkipFunctionalize func_guard;
16001 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16002 at::_ops::nll_loss2d_forward_output::call(self_meta, target_meta, weight_meta, reduction, ignore_index, output_meta, total_weight_meta);
16003 }
16004
16005 at::Tensor self_;
16006 if (at::functionalization::impl::isFunctionalTensor(self)) {
16007 at::functionalization::impl::sync(self);
16008 self_ = at::functionalization::impl::from_functional_tensor(self);
16009 } else {
16010 self_ = self;
16011 }
16012
16013 at::Tensor target_;
16014 if (at::functionalization::impl::isFunctionalTensor(target)) {
16015 at::functionalization::impl::sync(target);
16016 target_ = at::functionalization::impl::from_functional_tensor(target);
16017 } else {
16018 target_ = target;
16019 }
16020
16021 c10::optional<at::Tensor> weight_;
16022 if (at::functionalization::impl::isFunctionalTensor(weight)) {
16023 at::functionalization::impl::sync(weight);
16024 weight_ = at::functionalization::impl::from_functional_tensor(weight);
16025 } else {
16026 weight_ = weight;
16027 }
16028
16029 at::Tensor output_;
16030 if (at::functionalization::impl::isFunctionalTensor(output)) {
16031 at::functionalization::impl::sync(output);
16032 output_ = at::functionalization::impl::from_functional_tensor(output);
16033 } else {
16034 output_ = output;
16035 }
16036
16037 at::Tensor total_weight_;
16038 if (at::functionalization::impl::isFunctionalTensor(total_weight)) {
16039 at::functionalization::impl::sync(total_weight);
16040 total_weight_ = at::functionalization::impl::from_functional_tensor(total_weight);
16041 } else {
16042 total_weight_ = total_weight;
16043 }
16044 if (!(true && at::functionalization::impl::isFunctionalTensor(output) && at::functionalization::impl::isFunctionalTensor(total_weight))) {
16045 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) {
16046 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16047 TORCH_INTERNAL_ASSERT(false,
16048 "mutating a non-functional tensor with a functional tensor is not allowed.",
16049 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16050 } else {
16051 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16052 at::AutoDispatchSkipFunctionalize guard;
16053 ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::nll_loss2d_forward_output::call(self_, target_, weight_, reduction, ignore_index, output_, total_weight_);
16054 return ::std::tuple<at::Tensor &,at::Tensor &>(output, total_weight);;
16055 }
16056 } else {
16057 ::std::tuple<at::Tensor,at::Tensor> tmp_output;
16058 {
16059 at::AutoDispatchSkipFunctionalize guard;
16060 tmp_output = at::_ops::nll_loss2d_forward::call(self_, target_, weight_, reduction, ignore_index);
16061 }
16062 at::functionalization::impl::replace_(output, std::get<0>(tmp_output));
16063 at::functionalization::impl::commit_update(output);
16064 at::functionalization::impl::sync(output);
16065 at::functionalization::impl::replace_(total_weight, std::get<1>(tmp_output));
16066 at::functionalization::impl::commit_update(total_weight);
16067 at::functionalization::impl::sync(total_weight);
16068 return ::std::tuple<at::Tensor &,at::Tensor &>(output, total_weight);
16069 }
16070 }
16071
16072 at::Tensor & soft_margin_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
16073 if (false) {
16074 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16075 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16076 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16077 auto self_meta = to_meta(self);
16078 auto target_meta = to_meta(target);
16079 auto out_meta = to_meta(out);
16080 at::AutoDispatchSkipFunctionalize func_guard;
16081 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16082 at::_ops::soft_margin_loss_out::call(self_meta, target_meta, reduction, out_meta);
16083 }
16084
16085 at::Tensor self_;
16086 if (at::functionalization::impl::isFunctionalTensor(self)) {
16087 at::functionalization::impl::sync(self);
16088 self_ = at::functionalization::impl::from_functional_tensor(self);
16089 } else {
16090 self_ = self;
16091 }
16092
16093 at::Tensor target_;
16094 if (at::functionalization::impl::isFunctionalTensor(target)) {
16095 at::functionalization::impl::sync(target);
16096 target_ = at::functionalization::impl::from_functional_tensor(target);
16097 } else {
16098 target_ = target;
16099 }
16100
16101 at::Tensor out_;
16102 if (at::functionalization::impl::isFunctionalTensor(out)) {
16103 at::functionalization::impl::sync(out);
16104 out_ = at::functionalization::impl::from_functional_tensor(out);
16105 } else {
16106 out_ = out;
16107 }
16108 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
16109 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
16110 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16111 TORCH_INTERNAL_ASSERT(false,
16112 "mutating a non-functional tensor with a functional tensor is not allowed.",
16113 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16114 } else {
16115 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16116 at::AutoDispatchSkipFunctionalize guard;
16117 at::Tensor tmp_output = at::_ops::soft_margin_loss_out::call(self_, target_, reduction, out_);
16118 return out;;
16119 }
16120 } else {
16121 at::Tensor tmp_output;
16122 {
16123 at::AutoDispatchSkipFunctionalize guard;
16124 tmp_output = at::_ops::soft_margin_loss::call(self_, target_, reduction);
16125 }
16126 at::functionalization::impl::replace_(out, tmp_output);
16127 at::functionalization::impl::commit_update(out);
16128 at::functionalization::impl::sync(out);
16129 return out;
16130 }
16131 }
16132
16133 at::Tensor & glu_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) {
16134 if (false) {
16135 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16136 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16137 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16138 auto grad_output_meta = to_meta(grad_output);
16139 auto self_meta = to_meta(self);
16140 auto grad_input_meta = to_meta(grad_input);
16141 at::AutoDispatchSkipFunctionalize func_guard;
16142 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16143 at::_ops::glu_backward_grad_input::call(grad_output_meta, self_meta, dim, grad_input_meta);
16144 }
16145
16146 at::Tensor grad_output_;
16147 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
16148 at::functionalization::impl::sync(grad_output);
16149 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
16150 } else {
16151 grad_output_ = grad_output;
16152 }
16153
16154 at::Tensor self_;
16155 if (at::functionalization::impl::isFunctionalTensor(self)) {
16156 at::functionalization::impl::sync(self);
16157 self_ = at::functionalization::impl::from_functional_tensor(self);
16158 } else {
16159 self_ = self;
16160 }
16161
16162 at::Tensor grad_input_;
16163 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
16164 at::functionalization::impl::sync(grad_input);
16165 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
16166 } else {
16167 grad_input_ = grad_input;
16168 }
16169 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
16170 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
16171 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16172 TORCH_INTERNAL_ASSERT(false,
16173 "mutating a non-functional tensor with a functional tensor is not allowed.",
16174 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16175 } else {
16176 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16177 at::AutoDispatchSkipFunctionalize guard;
16178 at::Tensor tmp_output = at::_ops::glu_backward_grad_input::call(grad_output_, self_, dim, grad_input_);
16179 return grad_input;;
16180 }
16181 } else {
16182 at::Tensor tmp_output;
16183 {
16184 at::AutoDispatchSkipFunctionalize guard;
16185 tmp_output = at::_ops::glu_backward::call(grad_output_, self_, dim);
16186 }
16187 at::functionalization::impl::replace_(grad_input, tmp_output);
16188 at::functionalization::impl::commit_update(grad_input);
16189 at::functionalization::impl::sync(grad_input);
16190 return grad_input;
16191 }
16192 }
16193
16194 at::Tensor & glu_backward_jvp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
16195 if (false) {
16196 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16197 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16198 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16199 auto grad_x_meta = to_meta(grad_x);
16200 auto grad_glu_meta = to_meta(grad_glu);
16201 auto x_meta = to_meta(x);
16202 auto dgrad_glu_meta = to_meta(dgrad_glu);
16203 auto dx_meta = to_meta(dx);
16204 auto out_meta = to_meta(out);
16205 at::AutoDispatchSkipFunctionalize func_guard;
16206 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16207 at::_ops::glu_backward_jvp_out::call(grad_x_meta, grad_glu_meta, x_meta, dgrad_glu_meta, dx_meta, dim, out_meta);
16208 }
16209
16210 at::Tensor grad_x_;
16211 if (at::functionalization::impl::isFunctionalTensor(grad_x)) {
16212 at::functionalization::impl::sync(grad_x);
16213 grad_x_ = at::functionalization::impl::from_functional_tensor(grad_x);
16214 } else {
16215 grad_x_ = grad_x;
16216 }
16217
16218 at::Tensor grad_glu_;
16219 if (at::functionalization::impl::isFunctionalTensor(grad_glu)) {
16220 at::functionalization::impl::sync(grad_glu);
16221 grad_glu_ = at::functionalization::impl::from_functional_tensor(grad_glu);
16222 } else {
16223 grad_glu_ = grad_glu;
16224 }
16225
16226 at::Tensor x_;
16227 if (at::functionalization::impl::isFunctionalTensor(x)) {
16228 at::functionalization::impl::sync(x);
16229 x_ = at::functionalization::impl::from_functional_tensor(x);
16230 } else {
16231 x_ = x;
16232 }
16233
16234 at::Tensor dgrad_glu_;
16235 if (at::functionalization::impl::isFunctionalTensor(dgrad_glu)) {
16236 at::functionalization::impl::sync(dgrad_glu);
16237 dgrad_glu_ = at::functionalization::impl::from_functional_tensor(dgrad_glu);
16238 } else {
16239 dgrad_glu_ = dgrad_glu;
16240 }
16241
16242 at::Tensor dx_;
16243 if (at::functionalization::impl::isFunctionalTensor(dx)) {
16244 at::functionalization::impl::sync(dx);
16245 dx_ = at::functionalization::impl::from_functional_tensor(dx);
16246 } else {
16247 dx_ = dx;
16248 }
16249
16250 at::Tensor out_;
16251 if (at::functionalization::impl::isFunctionalTensor(out)) {
16252 at::functionalization::impl::sync(out);
16253 out_ = at::functionalization::impl::from_functional_tensor(out);
16254 } else {
16255 out_ = out;
16256 }
16257 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
16258 if ((false || at::functionalization::impl::isFunctionalTensor(grad_x) || at::functionalization::impl::isFunctionalTensor(grad_glu) || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(dgrad_glu) || at::functionalization::impl::isFunctionalTensor(dx))) {
16259 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16260 TORCH_INTERNAL_ASSERT(false,
16261 "mutating a non-functional tensor with a functional tensor is not allowed.",
16262 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16263 } else {
16264 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16265 at::AutoDispatchSkipFunctionalize guard;
16266 at::Tensor tmp_output = at::_ops::glu_backward_jvp_out::call(grad_x_, grad_glu_, x_, dgrad_glu_, dx_, dim, out_);
16267 return out;;
16268 }
16269 } else {
16270 at::Tensor tmp_output;
16271 {
16272 at::AutoDispatchSkipFunctionalize guard;
16273 tmp_output = at::_ops::glu_backward_jvp::call(grad_x_, grad_glu_, x_, dgrad_glu_, dx_, dim);
16274 }
16275 at::functionalization::impl::replace_(out, tmp_output);
16276 at::functionalization::impl::commit_update(out);
16277 at::functionalization::impl::sync(out);
16278 return out;
16279 }
16280 }
16281
16282 at::Tensor & hardtanh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) {
16283 if (false) {
16284 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16285 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16286 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16287 auto self_meta = to_meta(self);
16288 auto out_meta = to_meta(out);
16289 at::AutoDispatchSkipFunctionalize func_guard;
16290 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16291 at::_ops::hardtanh_out::call(self_meta, min_val, max_val, out_meta);
16292 }
16293
16294 at::Tensor self_;
16295 if (at::functionalization::impl::isFunctionalTensor(self)) {
16296 at::functionalization::impl::sync(self);
16297 self_ = at::functionalization::impl::from_functional_tensor(self);
16298 } else {
16299 self_ = self;
16300 }
16301
16302 at::Tensor out_;
16303 if (at::functionalization::impl::isFunctionalTensor(out)) {
16304 at::functionalization::impl::sync(out);
16305 out_ = at::functionalization::impl::from_functional_tensor(out);
16306 } else {
16307 out_ = out;
16308 }
16309 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
16310 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
16311 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16312 TORCH_INTERNAL_ASSERT(false,
16313 "mutating a non-functional tensor with a functional tensor is not allowed.",
16314 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16315 } else {
16316 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16317 at::AutoDispatchSkipFunctionalize guard;
16318 at::Tensor tmp_output = at::_ops::hardtanh_out::call(self_, min_val, max_val, out_);
16319 return out;;
16320 }
16321 } else {
16322 at::Tensor tmp_output;
16323 {
16324 at::AutoDispatchSkipFunctionalize guard;
16325 tmp_output = at::_ops::hardtanh::call(self_, min_val, max_val);
16326 }
16327 at::functionalization::impl::replace_(out, tmp_output);
16328 at::functionalization::impl::commit_update(out);
16329 at::functionalization::impl::sync(out);
16330 return out;
16331 }
16332 }
16333
16334 at::Tensor & hardtanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
16335 if (true) {
16336 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16337 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16338 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16339 auto self_meta = to_meta(self);
16340 at::AutoDispatchSkipFunctionalize func_guard;
16341 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16342 at::_ops::hardtanh_::call(self_meta, min_val, max_val);
16343 }
16344
16345 at::Tensor self_;
16346 if (at::functionalization::impl::isFunctionalTensor(self)) {
16347 at::functionalization::impl::sync(self);
16348 self_ = at::functionalization::impl::from_functional_tensor(self);
16349 } else {
16350 self_ = self;
16351 }
16352 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
16353 if ((false)) {
16354 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16355 TORCH_INTERNAL_ASSERT(false,
16356 "mutating a non-functional tensor with a functional tensor is not allowed.",
16357 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16358 } else {
16359 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16360 at::AutoDispatchSkipFunctionalize guard;
16361 at::Tensor tmp_output = at::_ops::hardtanh_::call(self_, min_val, max_val);
16362 return self;;
16363 }
16364 } else {
16365 at::Tensor tmp_output;
16366 {
16367 at::AutoDispatchSkipFunctionalize guard;
16368 tmp_output = at::_ops::hardtanh::call(self_, min_val, max_val);
16369 }
16370 at::functionalization::impl::replace_(self, tmp_output);
16371 at::functionalization::impl::commit_update(self);
16372 at::functionalization::impl::sync(self);
16373 return self;
16374 }
16375 }
16376
16377 at::Tensor & hardtanh_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) {
16378 if (false) {
16379 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16380 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16381 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16382 auto grad_output_meta = to_meta(grad_output);
16383 auto self_meta = to_meta(self);
16384 auto grad_input_meta = to_meta(grad_input);
16385 at::AutoDispatchSkipFunctionalize func_guard;
16386 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16387 at::_ops::hardtanh_backward_grad_input::call(grad_output_meta, self_meta, min_val, max_val, grad_input_meta);
16388 }
16389
16390 at::Tensor grad_output_;
16391 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
16392 at::functionalization::impl::sync(grad_output);
16393 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
16394 } else {
16395 grad_output_ = grad_output;
16396 }
16397
16398 at::Tensor self_;
16399 if (at::functionalization::impl::isFunctionalTensor(self)) {
16400 at::functionalization::impl::sync(self);
16401 self_ = at::functionalization::impl::from_functional_tensor(self);
16402 } else {
16403 self_ = self;
16404 }
16405
16406 at::Tensor grad_input_;
16407 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
16408 at::functionalization::impl::sync(grad_input);
16409 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
16410 } else {
16411 grad_input_ = grad_input;
16412 }
16413 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
16414 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
16415 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16416 TORCH_INTERNAL_ASSERT(false,
16417 "mutating a non-functional tensor with a functional tensor is not allowed.",
16418 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16419 } else {
16420 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16421 at::AutoDispatchSkipFunctionalize guard;
16422 at::Tensor tmp_output = at::_ops::hardtanh_backward_grad_input::call(grad_output_, self_, min_val, max_val, grad_input_);
16423 return grad_input;;
16424 }
16425 } else {
16426 at::Tensor tmp_output;
16427 {
16428 at::AutoDispatchSkipFunctionalize guard;
16429 tmp_output = at::_ops::hardtanh_backward::call(grad_output_, self_, min_val, max_val);
16430 }
16431 at::functionalization::impl::replace_(grad_input, tmp_output);
16432 at::functionalization::impl::commit_update(grad_input);
16433 at::functionalization::impl::sync(grad_input);
16434 return grad_input;
16435 }
16436 }
16437
16438 at::Tensor & leaky_relu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
16439 if (false) {
16440 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16441 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16442 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16443 auto self_meta = to_meta(self);
16444 auto out_meta = to_meta(out);
16445 at::AutoDispatchSkipFunctionalize func_guard;
16446 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16447 at::_ops::leaky_relu_out::call(self_meta, negative_slope, out_meta);
16448 }
16449
16450 at::Tensor self_;
16451 if (at::functionalization::impl::isFunctionalTensor(self)) {
16452 at::functionalization::impl::sync(self);
16453 self_ = at::functionalization::impl::from_functional_tensor(self);
16454 } else {
16455 self_ = self;
16456 }
16457
16458 at::Tensor out_;
16459 if (at::functionalization::impl::isFunctionalTensor(out)) {
16460 at::functionalization::impl::sync(out);
16461 out_ = at::functionalization::impl::from_functional_tensor(out);
16462 } else {
16463 out_ = out;
16464 }
16465 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
16466 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
16467 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16468 TORCH_INTERNAL_ASSERT(false,
16469 "mutating a non-functional tensor with a functional tensor is not allowed.",
16470 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16471 } else {
16472 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16473 at::AutoDispatchSkipFunctionalize guard;
16474 at::Tensor tmp_output = at::_ops::leaky_relu_out::call(self_, negative_slope, out_);
16475 return out;;
16476 }
16477 } else {
16478 at::Tensor tmp_output;
16479 {
16480 at::AutoDispatchSkipFunctionalize guard;
16481 tmp_output = at::_ops::leaky_relu::call(self_, negative_slope);
16482 }
16483 at::functionalization::impl::replace_(out, tmp_output);
16484 at::functionalization::impl::commit_update(out);
16485 at::functionalization::impl::sync(out);
16486 return out;
16487 }
16488 }
16489
16490 at::Tensor & leaky_relu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & negative_slope) {
16491 if (true) {
16492 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16493 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16494 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16495 auto self_meta = to_meta(self);
16496 at::AutoDispatchSkipFunctionalize func_guard;
16497 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16498 at::_ops::leaky_relu_::call(self_meta, negative_slope);
16499 }
16500
16501 at::Tensor self_;
16502 if (at::functionalization::impl::isFunctionalTensor(self)) {
16503 at::functionalization::impl::sync(self);
16504 self_ = at::functionalization::impl::from_functional_tensor(self);
16505 } else {
16506 self_ = self;
16507 }
16508 if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
16509 if ((false)) {
16510 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16511 TORCH_INTERNAL_ASSERT(false,
16512 "mutating a non-functional tensor with a functional tensor is not allowed.",
16513 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16514 } else {
16515 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16516 at::AutoDispatchSkipFunctionalize guard;
16517 at::Tensor tmp_output = at::_ops::leaky_relu_::call(self_, negative_slope);
16518 return self;;
16519 }
16520 } else {
16521 at::Tensor tmp_output;
16522 {
16523 at::AutoDispatchSkipFunctionalize guard;
16524 tmp_output = at::_ops::leaky_relu::call(self_, negative_slope);
16525 }
16526 at::functionalization::impl::replace_(self, tmp_output);
16527 at::functionalization::impl::commit_update(self);
16528 at::functionalization::impl::sync(self);
16529 return self;
16530 }
16531 }
16532
16533 at::Tensor & log_sigmoid_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) {
16534 if (false) {
16535 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16536 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16537 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16538 auto grad_output_meta = to_meta(grad_output);
16539 auto self_meta = to_meta(self);
16540 auto buffer_meta = to_meta(buffer);
16541 auto grad_input_meta = to_meta(grad_input);
16542 at::AutoDispatchSkipFunctionalize func_guard;
16543 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16544 at::_ops::log_sigmoid_backward_grad_input::call(grad_output_meta, self_meta, buffer_meta, grad_input_meta);
16545 }
16546
16547 at::Tensor grad_output_;
16548 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
16549 at::functionalization::impl::sync(grad_output);
16550 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
16551 } else {
16552 grad_output_ = grad_output;
16553 }
16554
16555 at::Tensor self_;
16556 if (at::functionalization::impl::isFunctionalTensor(self)) {
16557 at::functionalization::impl::sync(self);
16558 self_ = at::functionalization::impl::from_functional_tensor(self);
16559 } else {
16560 self_ = self;
16561 }
16562
16563 at::Tensor buffer_;
16564 if (at::functionalization::impl::isFunctionalTensor(buffer)) {
16565 at::functionalization::impl::sync(buffer);
16566 buffer_ = at::functionalization::impl::from_functional_tensor(buffer);
16567 } else {
16568 buffer_ = buffer;
16569 }
16570
16571 at::Tensor grad_input_;
16572 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
16573 at::functionalization::impl::sync(grad_input);
16574 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
16575 } else {
16576 grad_input_ = grad_input;
16577 }
16578 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
16579 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(buffer))) {
16580 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16581 TORCH_INTERNAL_ASSERT(false,
16582 "mutating a non-functional tensor with a functional tensor is not allowed.",
16583 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16584 } else {
16585 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16586 at::AutoDispatchSkipFunctionalize guard;
16587 at::Tensor tmp_output = at::_ops::log_sigmoid_backward_grad_input::call(grad_output_, self_, buffer_, grad_input_);
16588 return grad_input;;
16589 }
16590 } else {
16591 at::Tensor tmp_output;
16592 {
16593 at::AutoDispatchSkipFunctionalize guard;
16594 tmp_output = at::_ops::log_sigmoid_backward::call(grad_output_, self_, buffer_);
16595 }
16596 at::functionalization::impl::replace_(grad_input, tmp_output);
16597 at::functionalization::impl::commit_update(grad_input);
16598 at::functionalization::impl::sync(grad_input);
16599 return grad_input;
16600 }
16601 }
16602
16603 at::Tensor & softplus_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) {
16604 if (false) {
16605 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16606 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16607 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16608 auto self_meta = to_meta(self);
16609 auto out_meta = to_meta(out);
16610 at::AutoDispatchSkipFunctionalize func_guard;
16611 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16612 at::_ops::softplus_out::call(self_meta, beta, threshold, out_meta);
16613 }
16614
16615 at::Tensor self_;
16616 if (at::functionalization::impl::isFunctionalTensor(self)) {
16617 at::functionalization::impl::sync(self);
16618 self_ = at::functionalization::impl::from_functional_tensor(self);
16619 } else {
16620 self_ = self;
16621 }
16622
16623 at::Tensor out_;
16624 if (at::functionalization::impl::isFunctionalTensor(out)) {
16625 at::functionalization::impl::sync(out);
16626 out_ = at::functionalization::impl::from_functional_tensor(out);
16627 } else {
16628 out_ = out;
16629 }
16630 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
16631 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
16632 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16633 TORCH_INTERNAL_ASSERT(false,
16634 "mutating a non-functional tensor with a functional tensor is not allowed.",
16635 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16636 } else {
16637 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16638 at::AutoDispatchSkipFunctionalize guard;
16639 at::Tensor tmp_output = at::_ops::softplus_out::call(self_, beta, threshold, out_);
16640 return out;;
16641 }
16642 } else {
16643 at::Tensor tmp_output;
16644 {
16645 at::AutoDispatchSkipFunctionalize guard;
16646 tmp_output = at::_ops::softplus::call(self_, beta, threshold);
16647 }
16648 at::functionalization::impl::replace_(out, tmp_output);
16649 at::functionalization::impl::commit_update(out);
16650 at::functionalization::impl::sync(out);
16651 return out;
16652 }
16653 }
16654
16655 at::Tensor & adaptive_avg_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
16656 if (false) {
16657 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16658 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16659 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16660 auto self_meta = to_meta(self);
16661 auto out_meta = to_meta(out);
16662 at::AutoDispatchSkipFunctionalize func_guard;
16663 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16664 at::_ops::adaptive_avg_pool2d_out::call(self_meta, output_size, out_meta);
16665 }
16666
16667 at::Tensor self_;
16668 if (at::functionalization::impl::isFunctionalTensor(self)) {
16669 at::functionalization::impl::sync(self);
16670 self_ = at::functionalization::impl::from_functional_tensor(self);
16671 } else {
16672 self_ = self;
16673 }
16674
16675 at::Tensor out_;
16676 if (at::functionalization::impl::isFunctionalTensor(out)) {
16677 at::functionalization::impl::sync(out);
16678 out_ = at::functionalization::impl::from_functional_tensor(out);
16679 } else {
16680 out_ = out;
16681 }
16682 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
16683 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
16684 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16685 TORCH_INTERNAL_ASSERT(false,
16686 "mutating a non-functional tensor with a functional tensor is not allowed.",
16687 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16688 } else {
16689 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16690 at::AutoDispatchSkipFunctionalize guard;
16691 at::Tensor tmp_output = at::_ops::adaptive_avg_pool2d_out::call(self_, output_size, out_);
16692 return out;;
16693 }
16694 } else {
16695 at::Tensor tmp_output;
16696 {
16697 at::AutoDispatchSkipFunctionalize guard;
16698 tmp_output = at::_ops::adaptive_avg_pool2d::call(self_, output_size);
16699 }
16700 at::functionalization::impl::replace_(out, tmp_output);
16701 at::functionalization::impl::commit_update(out);
16702 at::functionalization::impl::sync(out);
16703 return out;
16704 }
16705 }
16706
16707 at::Tensor & _adaptive_avg_pool3d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
16708 if (false) {
16709 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16710 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16711 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16712 auto grad_output_meta = to_meta(grad_output);
16713 auto self_meta = to_meta(self);
16714 auto out_meta = to_meta(out);
16715 at::AutoDispatchSkipFunctionalize func_guard;
16716 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16717 at::_ops::_adaptive_avg_pool3d_backward_out::call(grad_output_meta, self_meta, out_meta);
16718 }
16719
16720 at::Tensor grad_output_;
16721 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
16722 at::functionalization::impl::sync(grad_output);
16723 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
16724 } else {
16725 grad_output_ = grad_output;
16726 }
16727
16728 at::Tensor self_;
16729 if (at::functionalization::impl::isFunctionalTensor(self)) {
16730 at::functionalization::impl::sync(self);
16731 self_ = at::functionalization::impl::from_functional_tensor(self);
16732 } else {
16733 self_ = self;
16734 }
16735
16736 at::Tensor out_;
16737 if (at::functionalization::impl::isFunctionalTensor(out)) {
16738 at::functionalization::impl::sync(out);
16739 out_ = at::functionalization::impl::from_functional_tensor(out);
16740 } else {
16741 out_ = out;
16742 }
16743 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
16744 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
16745 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16746 TORCH_INTERNAL_ASSERT(false,
16747 "mutating a non-functional tensor with a functional tensor is not allowed.",
16748 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16749 } else {
16750 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16751 at::AutoDispatchSkipFunctionalize guard;
16752 at::Tensor tmp_output = at::_ops::_adaptive_avg_pool3d_backward_out::call(grad_output_, self_, out_);
16753 return out;;
16754 }
16755 } else {
16756 at::Tensor tmp_output;
16757 {
16758 at::AutoDispatchSkipFunctionalize guard;
16759 tmp_output = at::_ops::_adaptive_avg_pool3d_backward::call(grad_output_, self_);
16760 }
16761 at::functionalization::impl::replace_(out, tmp_output);
16762 at::functionalization::impl::commit_update(out);
16763 at::functionalization::impl::sync(out);
16764 return out;
16765 }
16766 }
16767
16768 at::Tensor & adaptive_max_pool2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
16769 if (false) {
16770 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16771 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16772 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16773 auto grad_output_meta = to_meta(grad_output);
16774 auto self_meta = to_meta(self);
16775 auto indices_meta = to_meta(indices);
16776 auto grad_input_meta = to_meta(grad_input);
16777 at::AutoDispatchSkipFunctionalize func_guard;
16778 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16779 at::_ops::adaptive_max_pool2d_backward_grad_input::call(grad_output_meta, self_meta, indices_meta, grad_input_meta);
16780 }
16781
16782 at::Tensor grad_output_;
16783 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
16784 at::functionalization::impl::sync(grad_output);
16785 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
16786 } else {
16787 grad_output_ = grad_output;
16788 }
16789
16790 at::Tensor self_;
16791 if (at::functionalization::impl::isFunctionalTensor(self)) {
16792 at::functionalization::impl::sync(self);
16793 self_ = at::functionalization::impl::from_functional_tensor(self);
16794 } else {
16795 self_ = self;
16796 }
16797
16798 at::Tensor indices_;
16799 if (at::functionalization::impl::isFunctionalTensor(indices)) {
16800 at::functionalization::impl::sync(indices);
16801 indices_ = at::functionalization::impl::from_functional_tensor(indices);
16802 } else {
16803 indices_ = indices;
16804 }
16805
16806 at::Tensor grad_input_;
16807 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
16808 at::functionalization::impl::sync(grad_input);
16809 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
16810 } else {
16811 grad_input_ = grad_input;
16812 }
16813 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
16814 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
16815 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16816 TORCH_INTERNAL_ASSERT(false,
16817 "mutating a non-functional tensor with a functional tensor is not allowed.",
16818 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16819 } else {
16820 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16821 at::AutoDispatchSkipFunctionalize guard;
16822 at::Tensor tmp_output = at::_ops::adaptive_max_pool2d_backward_grad_input::call(grad_output_, self_, indices_, grad_input_);
16823 return grad_input;;
16824 }
16825 } else {
16826 at::Tensor tmp_output;
16827 {
16828 at::AutoDispatchSkipFunctionalize guard;
16829 tmp_output = at::_ops::adaptive_max_pool2d_backward::call(grad_output_, self_, indices_);
16830 }
16831 at::functionalization::impl::replace_(grad_input, tmp_output);
16832 at::functionalization::impl::commit_update(grad_input);
16833 at::functionalization::impl::sync(grad_input);
16834 return grad_input;
16835 }
16836 }
16837
16838 at::Tensor & fractional_max_pool2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
16839 if (false) {
16840 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16841 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16842 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16843 auto grad_output_meta = to_meta(grad_output);
16844 auto self_meta = to_meta(self);
16845 auto indices_meta = to_meta(indices);
16846 auto grad_input_meta = to_meta(grad_input);
16847 at::AutoDispatchSkipFunctionalize func_guard;
16848 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16849 at::_ops::fractional_max_pool2d_backward_grad_input::call(grad_output_meta, self_meta, kernel_size, output_size, indices_meta, grad_input_meta);
16850 }
16851
16852 at::Tensor grad_output_;
16853 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
16854 at::functionalization::impl::sync(grad_output);
16855 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
16856 } else {
16857 grad_output_ = grad_output;
16858 }
16859
16860 at::Tensor self_;
16861 if (at::functionalization::impl::isFunctionalTensor(self)) {
16862 at::functionalization::impl::sync(self);
16863 self_ = at::functionalization::impl::from_functional_tensor(self);
16864 } else {
16865 self_ = self;
16866 }
16867
16868 at::Tensor indices_;
16869 if (at::functionalization::impl::isFunctionalTensor(indices)) {
16870 at::functionalization::impl::sync(indices);
16871 indices_ = at::functionalization::impl::from_functional_tensor(indices);
16872 } else {
16873 indices_ = indices;
16874 }
16875
16876 at::Tensor grad_input_;
16877 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
16878 at::functionalization::impl::sync(grad_input);
16879 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
16880 } else {
16881 grad_input_ = grad_input;
16882 }
16883 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
16884 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
16885 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16886 TORCH_INTERNAL_ASSERT(false,
16887 "mutating a non-functional tensor with a functional tensor is not allowed.",
16888 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16889 } else {
16890 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16891 at::AutoDispatchSkipFunctionalize guard;
16892 at::Tensor tmp_output = at::_ops::fractional_max_pool2d_backward_grad_input::call(grad_output_, self_, kernel_size, output_size, indices_, grad_input_);
16893 return grad_input;;
16894 }
16895 } else {
16896 at::Tensor tmp_output;
16897 {
16898 at::AutoDispatchSkipFunctionalize guard;
16899 tmp_output = at::_ops::fractional_max_pool2d_backward::call(grad_output_, self_, kernel_size, output_size, indices_);
16900 }
16901 at::functionalization::impl::replace_(grad_input, tmp_output);
16902 at::functionalization::impl::commit_update(grad_input);
16903 at::functionalization::impl::sync(grad_input);
16904 return grad_input;
16905 }
16906 }
16907
16908 at::Tensor & fractional_max_pool3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
16909 if (false) {
16910 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16911 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16912 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16913 auto grad_output_meta = to_meta(grad_output);
16914 auto self_meta = to_meta(self);
16915 auto indices_meta = to_meta(indices);
16916 auto grad_input_meta = to_meta(grad_input);
16917 at::AutoDispatchSkipFunctionalize func_guard;
16918 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16919 at::_ops::fractional_max_pool3d_backward_grad_input::call(grad_output_meta, self_meta, kernel_size, output_size, indices_meta, grad_input_meta);
16920 }
16921
16922 at::Tensor grad_output_;
16923 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
16924 at::functionalization::impl::sync(grad_output);
16925 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
16926 } else {
16927 grad_output_ = grad_output;
16928 }
16929
16930 at::Tensor self_;
16931 if (at::functionalization::impl::isFunctionalTensor(self)) {
16932 at::functionalization::impl::sync(self);
16933 self_ = at::functionalization::impl::from_functional_tensor(self);
16934 } else {
16935 self_ = self;
16936 }
16937
16938 at::Tensor indices_;
16939 if (at::functionalization::impl::isFunctionalTensor(indices)) {
16940 at::functionalization::impl::sync(indices);
16941 indices_ = at::functionalization::impl::from_functional_tensor(indices);
16942 } else {
16943 indices_ = indices;
16944 }
16945
16946 at::Tensor grad_input_;
16947 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
16948 at::functionalization::impl::sync(grad_input);
16949 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
16950 } else {
16951 grad_input_ = grad_input;
16952 }
16953 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
16954 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
16955 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
16956 TORCH_INTERNAL_ASSERT(false,
16957 "mutating a non-functional tensor with a functional tensor is not allowed.",
16958 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
16959 } else {
16960 // case 2: arguments are not functional tensors, so we no-op and redispatch.
16961 at::AutoDispatchSkipFunctionalize guard;
16962 at::Tensor tmp_output = at::_ops::fractional_max_pool3d_backward_grad_input::call(grad_output_, self_, kernel_size, output_size, indices_, grad_input_);
16963 return grad_input;;
16964 }
16965 } else {
16966 at::Tensor tmp_output;
16967 {
16968 at::AutoDispatchSkipFunctionalize guard;
16969 tmp_output = at::_ops::fractional_max_pool3d_backward::call(grad_output_, self_, kernel_size, output_size, indices_);
16970 }
16971 at::functionalization::impl::replace_(grad_input, tmp_output);
16972 at::functionalization::impl::commit_update(grad_input);
16973 at::functionalization::impl::sync(grad_input);
16974 return grad_input;
16975 }
16976 }
16977
16978 ::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
16979 if (false) {
16980 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
16981 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
16982 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
16983 auto self_meta = to_meta(self);
16984 auto out_meta = to_meta(out);
16985 auto indices_meta = to_meta(indices);
16986 at::AutoDispatchSkipFunctionalize func_guard;
16987 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
16988 at::_ops::max_pool3d_with_indices_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta, indices_meta);
16989 }
16990
16991 at::Tensor self_;
16992 if (at::functionalization::impl::isFunctionalTensor(self)) {
16993 at::functionalization::impl::sync(self);
16994 self_ = at::functionalization::impl::from_functional_tensor(self);
16995 } else {
16996 self_ = self;
16997 }
16998
16999 at::Tensor out_;
17000 if (at::functionalization::impl::isFunctionalTensor(out)) {
17001 at::functionalization::impl::sync(out);
17002 out_ = at::functionalization::impl::from_functional_tensor(out);
17003 } else {
17004 out_ = out;
17005 }
17006
17007 at::Tensor indices_;
17008 if (at::functionalization::impl::isFunctionalTensor(indices)) {
17009 at::functionalization::impl::sync(indices);
17010 indices_ = at::functionalization::impl::from_functional_tensor(indices);
17011 } else {
17012 indices_ = indices;
17013 }
17014 if (!(true && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(indices))) {
17015 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
17016 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17017 TORCH_INTERNAL_ASSERT(false,
17018 "mutating a non-functional tensor with a functional tensor is not allowed.",
17019 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17020 } else {
17021 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17022 at::AutoDispatchSkipFunctionalize guard;
17023 ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::max_pool3d_with_indices_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_, indices_);
17024 return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices);;
17025 }
17026 } else {
17027 ::std::tuple<at::Tensor,at::Tensor> tmp_output;
17028 {
17029 at::AutoDispatchSkipFunctionalize guard;
17030 tmp_output = at::_ops::max_pool3d_with_indices::call(self_, kernel_size, stride, padding, dilation, ceil_mode);
17031 }
17032 at::functionalization::impl::replace_(out, std::get<0>(tmp_output));
17033 at::functionalization::impl::commit_update(out);
17034 at::functionalization::impl::sync(out);
17035 at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
17036 at::functionalization::impl::commit_update(indices);
17037 at::functionalization::impl::sync(indices);
17038 return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices);
17039 }
17040 }
17041
17042 at::Tensor & max_pool3d_with_indices_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
17043 if (false) {
17044 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17045 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17046 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17047 auto grad_output_meta = to_meta(grad_output);
17048 auto self_meta = to_meta(self);
17049 auto indices_meta = to_meta(indices);
17050 auto grad_input_meta = to_meta(grad_input);
17051 at::AutoDispatchSkipFunctionalize func_guard;
17052 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17053 at::_ops::max_pool3d_with_indices_backward_grad_input::call(grad_output_meta, self_meta, kernel_size, stride, padding, dilation, ceil_mode, indices_meta, grad_input_meta);
17054 }
17055
17056 at::Tensor grad_output_;
17057 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
17058 at::functionalization::impl::sync(grad_output);
17059 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
17060 } else {
17061 grad_output_ = grad_output;
17062 }
17063
17064 at::Tensor self_;
17065 if (at::functionalization::impl::isFunctionalTensor(self)) {
17066 at::functionalization::impl::sync(self);
17067 self_ = at::functionalization::impl::from_functional_tensor(self);
17068 } else {
17069 self_ = self;
17070 }
17071
17072 at::Tensor indices_;
17073 if (at::functionalization::impl::isFunctionalTensor(indices)) {
17074 at::functionalization::impl::sync(indices);
17075 indices_ = at::functionalization::impl::from_functional_tensor(indices);
17076 } else {
17077 indices_ = indices;
17078 }
17079
17080 at::Tensor grad_input_;
17081 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
17082 at::functionalization::impl::sync(grad_input);
17083 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
17084 } else {
17085 grad_input_ = grad_input;
17086 }
17087 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
17088 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
17089 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17090 TORCH_INTERNAL_ASSERT(false,
17091 "mutating a non-functional tensor with a functional tensor is not allowed.",
17092 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17093 } else {
17094 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17095 at::AutoDispatchSkipFunctionalize guard;
17096 at::Tensor tmp_output = at::_ops::max_pool3d_with_indices_backward_grad_input::call(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_, grad_input_);
17097 return grad_input;;
17098 }
17099 } else {
17100 at::Tensor tmp_output;
17101 {
17102 at::AutoDispatchSkipFunctionalize guard;
17103 tmp_output = at::_ops::max_pool3d_with_indices_backward::call(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_);
17104 }
17105 at::functionalization::impl::replace_(grad_input, tmp_output);
17106 at::functionalization::impl::commit_update(grad_input);
17107 at::functionalization::impl::sync(grad_input);
17108 return grad_input;
17109 }
17110 }
17111
17112 at::Tensor & max_unpool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out) {
17113 if (false) {
17114 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17115 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17116 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17117 auto self_meta = to_meta(self);
17118 auto indices_meta = to_meta(indices);
17119 auto out_meta = to_meta(out);
17120 at::AutoDispatchSkipFunctionalize func_guard;
17121 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17122 at::_ops::max_unpool2d_out::call(self_meta, indices_meta, output_size, out_meta);
17123 }
17124
17125 at::Tensor self_;
17126 if (at::functionalization::impl::isFunctionalTensor(self)) {
17127 at::functionalization::impl::sync(self);
17128 self_ = at::functionalization::impl::from_functional_tensor(self);
17129 } else {
17130 self_ = self;
17131 }
17132
17133 at::Tensor indices_;
17134 if (at::functionalization::impl::isFunctionalTensor(indices)) {
17135 at::functionalization::impl::sync(indices);
17136 indices_ = at::functionalization::impl::from_functional_tensor(indices);
17137 } else {
17138 indices_ = indices;
17139 }
17140
17141 at::Tensor out_;
17142 if (at::functionalization::impl::isFunctionalTensor(out)) {
17143 at::functionalization::impl::sync(out);
17144 out_ = at::functionalization::impl::from_functional_tensor(out);
17145 } else {
17146 out_ = out;
17147 }
17148 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
17149 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
17150 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17151 TORCH_INTERNAL_ASSERT(false,
17152 "mutating a non-functional tensor with a functional tensor is not allowed.",
17153 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17154 } else {
17155 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17156 at::AutoDispatchSkipFunctionalize guard;
17157 at::Tensor tmp_output = at::_ops::max_unpool2d_out::call(self_, indices_, output_size, out_);
17158 return out;;
17159 }
17160 } else {
17161 at::Tensor tmp_output;
17162 {
17163 at::AutoDispatchSkipFunctionalize guard;
17164 tmp_output = at::_ops::max_unpool2d::call(self_, indices_, output_size);
17165 }
17166 at::functionalization::impl::replace_(out, tmp_output);
17167 at::functionalization::impl::commit_update(out);
17168 at::functionalization::impl::sync(out);
17169 return out;
17170 }
17171 }
17172
17173 at::Tensor & reflection_pad2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
17174 if (false) {
17175 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17176 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17177 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17178 auto grad_output_meta = to_meta(grad_output);
17179 auto self_meta = to_meta(self);
17180 auto grad_input_meta = to_meta(grad_input);
17181 at::AutoDispatchSkipFunctionalize func_guard;
17182 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17183 at::_ops::reflection_pad2d_backward_grad_input::call(grad_output_meta, self_meta, padding, grad_input_meta);
17184 }
17185
17186 at::Tensor grad_output_;
17187 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
17188 at::functionalization::impl::sync(grad_output);
17189 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
17190 } else {
17191 grad_output_ = grad_output;
17192 }
17193
17194 at::Tensor self_;
17195 if (at::functionalization::impl::isFunctionalTensor(self)) {
17196 at::functionalization::impl::sync(self);
17197 self_ = at::functionalization::impl::from_functional_tensor(self);
17198 } else {
17199 self_ = self;
17200 }
17201
17202 at::Tensor grad_input_;
17203 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
17204 at::functionalization::impl::sync(grad_input);
17205 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
17206 } else {
17207 grad_input_ = grad_input;
17208 }
17209 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
17210 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
17211 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17212 TORCH_INTERNAL_ASSERT(false,
17213 "mutating a non-functional tensor with a functional tensor is not allowed.",
17214 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17215 } else {
17216 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17217 at::AutoDispatchSkipFunctionalize guard;
17218 at::Tensor tmp_output = at::_ops::reflection_pad2d_backward_grad_input::call(grad_output_, self_, padding, grad_input_);
17219 return grad_input;;
17220 }
17221 } else {
17222 at::Tensor tmp_output;
17223 {
17224 at::AutoDispatchSkipFunctionalize guard;
17225 tmp_output = at::_ops::reflection_pad2d_backward::call(grad_output_, self_, padding);
17226 }
17227 at::functionalization::impl::replace_(grad_input, tmp_output);
17228 at::functionalization::impl::commit_update(grad_input);
17229 at::functionalization::impl::sync(grad_input);
17230 return grad_input;
17231 }
17232 }
17233
17234 at::Tensor & upsample_bilinear2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
17235 if (false) {
17236 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17237 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17238 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17239 auto self_meta = to_meta(self);
17240 auto out_meta = to_meta(out);
17241 at::AutoDispatchSkipFunctionalize func_guard;
17242 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17243 at::_ops::upsample_bilinear2d_out::call(self_meta, output_size, align_corners, scales_h, scales_w, out_meta);
17244 }
17245
17246 at::Tensor self_;
17247 if (at::functionalization::impl::isFunctionalTensor(self)) {
17248 at::functionalization::impl::sync(self);
17249 self_ = at::functionalization::impl::from_functional_tensor(self);
17250 } else {
17251 self_ = self;
17252 }
17253
17254 at::Tensor out_;
17255 if (at::functionalization::impl::isFunctionalTensor(out)) {
17256 at::functionalization::impl::sync(out);
17257 out_ = at::functionalization::impl::from_functional_tensor(out);
17258 } else {
17259 out_ = out;
17260 }
17261 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
17262 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
17263 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17264 TORCH_INTERNAL_ASSERT(false,
17265 "mutating a non-functional tensor with a functional tensor is not allowed.",
17266 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17267 } else {
17268 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17269 at::AutoDispatchSkipFunctionalize guard;
17270 at::Tensor tmp_output = at::_ops::upsample_bilinear2d_out::call(self_, output_size, align_corners, scales_h, scales_w, out_);
17271 return out;;
17272 }
17273 } else {
17274 at::Tensor tmp_output;
17275 {
17276 at::AutoDispatchSkipFunctionalize guard;
17277 tmp_output = at::_ops::upsample_bilinear2d::call(self_, output_size, align_corners, scales_h, scales_w);
17278 }
17279 at::functionalization::impl::replace_(out, tmp_output);
17280 at::functionalization::impl::commit_update(out);
17281 at::functionalization::impl::sync(out);
17282 return out;
17283 }
17284 }
17285
17286 at::Tensor & upsample_bilinear2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
17287 if (false) {
17288 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17289 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17290 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17291 auto grad_output_meta = to_meta(grad_output);
17292 auto grad_input_meta = to_meta(grad_input);
17293 at::AutoDispatchSkipFunctionalize func_guard;
17294 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17295 at::_ops::upsample_bilinear2d_backward_grad_input::call(grad_output_meta, output_size, input_size, align_corners, scales_h, scales_w, grad_input_meta);
17296 }
17297
17298 at::Tensor grad_output_;
17299 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
17300 at::functionalization::impl::sync(grad_output);
17301 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
17302 } else {
17303 grad_output_ = grad_output;
17304 }
17305
17306 at::Tensor grad_input_;
17307 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
17308 at::functionalization::impl::sync(grad_input);
17309 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
17310 } else {
17311 grad_input_ = grad_input;
17312 }
17313 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
17314 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
17315 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17316 TORCH_INTERNAL_ASSERT(false,
17317 "mutating a non-functional tensor with a functional tensor is not allowed.",
17318 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17319 } else {
17320 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17321 at::AutoDispatchSkipFunctionalize guard;
17322 at::Tensor tmp_output = at::_ops::upsample_bilinear2d_backward_grad_input::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w, grad_input_);
17323 return grad_input;;
17324 }
17325 } else {
17326 at::Tensor tmp_output;
17327 {
17328 at::AutoDispatchSkipFunctionalize guard;
17329 tmp_output = at::_ops::upsample_bilinear2d_backward::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w);
17330 }
17331 at::functionalization::impl::replace_(grad_input, tmp_output);
17332 at::functionalization::impl::commit_update(grad_input);
17333 at::functionalization::impl::sync(grad_input);
17334 return grad_input;
17335 }
17336 }
17337
17338 at::Tensor & _upsample_bilinear2d_aa_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
17339 if (false) {
17340 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17341 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17342 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17343 auto self_meta = to_meta(self);
17344 auto out_meta = to_meta(out);
17345 at::AutoDispatchSkipFunctionalize func_guard;
17346 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17347 at::_ops::_upsample_bilinear2d_aa_out::call(self_meta, output_size, align_corners, scales_h, scales_w, out_meta);
17348 }
17349
17350 at::Tensor self_;
17351 if (at::functionalization::impl::isFunctionalTensor(self)) {
17352 at::functionalization::impl::sync(self);
17353 self_ = at::functionalization::impl::from_functional_tensor(self);
17354 } else {
17355 self_ = self;
17356 }
17357
17358 at::Tensor out_;
17359 if (at::functionalization::impl::isFunctionalTensor(out)) {
17360 at::functionalization::impl::sync(out);
17361 out_ = at::functionalization::impl::from_functional_tensor(out);
17362 } else {
17363 out_ = out;
17364 }
17365 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
17366 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
17367 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17368 TORCH_INTERNAL_ASSERT(false,
17369 "mutating a non-functional tensor with a functional tensor is not allowed.",
17370 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17371 } else {
17372 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17373 at::AutoDispatchSkipFunctionalize guard;
17374 at::Tensor tmp_output = at::_ops::_upsample_bilinear2d_aa_out::call(self_, output_size, align_corners, scales_h, scales_w, out_);
17375 return out;;
17376 }
17377 } else {
17378 at::Tensor tmp_output;
17379 {
17380 at::AutoDispatchSkipFunctionalize guard;
17381 tmp_output = at::_ops::_upsample_bilinear2d_aa::call(self_, output_size, align_corners, scales_h, scales_w);
17382 }
17383 at::functionalization::impl::replace_(out, tmp_output);
17384 at::functionalization::impl::commit_update(out);
17385 at::functionalization::impl::sync(out);
17386 return out;
17387 }
17388 }
17389
17390 at::Tensor & upsample_trilinear3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
17391 if (false) {
17392 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17393 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17394 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17395 auto grad_output_meta = to_meta(grad_output);
17396 auto grad_input_meta = to_meta(grad_input);
17397 at::AutoDispatchSkipFunctionalize func_guard;
17398 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17399 at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output_meta, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input_meta);
17400 }
17401
17402 at::Tensor grad_output_;
17403 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
17404 at::functionalization::impl::sync(grad_output);
17405 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
17406 } else {
17407 grad_output_ = grad_output;
17408 }
17409
17410 at::Tensor grad_input_;
17411 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
17412 at::functionalization::impl::sync(grad_input);
17413 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
17414 } else {
17415 grad_input_ = grad_input;
17416 }
17417 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
17418 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
17419 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17420 TORCH_INTERNAL_ASSERT(false,
17421 "mutating a non-functional tensor with a functional tensor is not allowed.",
17422 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17423 } else {
17424 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17425 at::AutoDispatchSkipFunctionalize guard;
17426 at::Tensor tmp_output = at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output_, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input_);
17427 return grad_input;;
17428 }
17429 } else {
17430 at::Tensor tmp_output;
17431 {
17432 at::AutoDispatchSkipFunctionalize guard;
17433 tmp_output = at::_ops::upsample_trilinear3d_backward::call(grad_output_, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
17434 }
17435 at::functionalization::impl::replace_(grad_input, tmp_output);
17436 at::functionalization::impl::commit_update(grad_input);
17437 at::functionalization::impl::sync(grad_input);
17438 return grad_input;
17439 }
17440 }
17441
17442 at::Tensor & _upsample_nearest_exact3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
17443 if (false) {
17444 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17445 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17446 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17447 auto self_meta = to_meta(self);
17448 auto out_meta = to_meta(out);
17449 at::AutoDispatchSkipFunctionalize func_guard;
17450 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17451 at::_ops::_upsample_nearest_exact3d_out::call(self_meta, output_size, scales_d, scales_h, scales_w, out_meta);
17452 }
17453
17454 at::Tensor self_;
17455 if (at::functionalization::impl::isFunctionalTensor(self)) {
17456 at::functionalization::impl::sync(self);
17457 self_ = at::functionalization::impl::from_functional_tensor(self);
17458 } else {
17459 self_ = self;
17460 }
17461
17462 at::Tensor out_;
17463 if (at::functionalization::impl::isFunctionalTensor(out)) {
17464 at::functionalization::impl::sync(out);
17465 out_ = at::functionalization::impl::from_functional_tensor(out);
17466 } else {
17467 out_ = out;
17468 }
17469 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
17470 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
17471 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17472 TORCH_INTERNAL_ASSERT(false,
17473 "mutating a non-functional tensor with a functional tensor is not allowed.",
17474 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17475 } else {
17476 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17477 at::AutoDispatchSkipFunctionalize guard;
17478 at::Tensor tmp_output = at::_ops::_upsample_nearest_exact3d_out::call(self_, output_size, scales_d, scales_h, scales_w, out_);
17479 return out;;
17480 }
17481 } else {
17482 at::Tensor tmp_output;
17483 {
17484 at::AutoDispatchSkipFunctionalize guard;
17485 tmp_output = at::_ops::_upsample_nearest_exact3d::call(self_, output_size, scales_d, scales_h, scales_w);
17486 }
17487 at::functionalization::impl::replace_(out, tmp_output);
17488 at::functionalization::impl::commit_update(out);
17489 at::functionalization::impl::sync(out);
17490 return out;
17491 }
17492 }
17493
17494 at::Tensor & upsample_nearest3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
17495 if (false) {
17496 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17497 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17498 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17499 auto grad_output_meta = to_meta(grad_output);
17500 auto grad_input_meta = to_meta(grad_input);
17501 at::AutoDispatchSkipFunctionalize func_guard;
17502 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17503 at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output_meta, output_size, input_size, scales_d, scales_h, scales_w, grad_input_meta);
17504 }
17505
17506 at::Tensor grad_output_;
17507 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
17508 at::functionalization::impl::sync(grad_output);
17509 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
17510 } else {
17511 grad_output_ = grad_output;
17512 }
17513
17514 at::Tensor grad_input_;
17515 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
17516 at::functionalization::impl::sync(grad_input);
17517 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
17518 } else {
17519 grad_input_ = grad_input;
17520 }
17521 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
17522 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
17523 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17524 TORCH_INTERNAL_ASSERT(false,
17525 "mutating a non-functional tensor with a functional tensor is not allowed.",
17526 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17527 } else {
17528 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17529 at::AutoDispatchSkipFunctionalize guard;
17530 at::Tensor tmp_output = at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output_, output_size, input_size, scales_d, scales_h, scales_w, grad_input_);
17531 return grad_input;;
17532 }
17533 } else {
17534 at::Tensor tmp_output;
17535 {
17536 at::AutoDispatchSkipFunctionalize guard;
17537 tmp_output = at::_ops::upsample_nearest3d_backward::call(grad_output_, output_size, input_size, scales_d, scales_h, scales_w);
17538 }
17539 at::functionalization::impl::replace_(grad_input, tmp_output);
17540 at::functionalization::impl::commit_update(grad_input);
17541 at::functionalization::impl::sync(grad_input);
17542 return grad_input;
17543 }
17544 }
17545
17546 at::Tensor & logit_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps, at::Tensor & grad_input) {
17547 if (false) {
17548 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17549 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17550 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17551 auto grad_output_meta = to_meta(grad_output);
17552 auto self_meta = to_meta(self);
17553 auto grad_input_meta = to_meta(grad_input);
17554 at::AutoDispatchSkipFunctionalize func_guard;
17555 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17556 at::_ops::logit_backward_grad_input::call(grad_output_meta, self_meta, eps, grad_input_meta);
17557 }
17558
17559 at::Tensor grad_output_;
17560 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
17561 at::functionalization::impl::sync(grad_output);
17562 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
17563 } else {
17564 grad_output_ = grad_output;
17565 }
17566
17567 at::Tensor self_;
17568 if (at::functionalization::impl::isFunctionalTensor(self)) {
17569 at::functionalization::impl::sync(self);
17570 self_ = at::functionalization::impl::from_functional_tensor(self);
17571 } else {
17572 self_ = self;
17573 }
17574
17575 at::Tensor grad_input_;
17576 if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
17577 at::functionalization::impl::sync(grad_input);
17578 grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
17579 } else {
17580 grad_input_ = grad_input;
17581 }
17582 if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
17583 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
17584 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17585 TORCH_INTERNAL_ASSERT(false,
17586 "mutating a non-functional tensor with a functional tensor is not allowed.",
17587 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17588 } else {
17589 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17590 at::AutoDispatchSkipFunctionalize guard;
17591 at::Tensor tmp_output = at::_ops::logit_backward_grad_input::call(grad_output_, self_, eps, grad_input_);
17592 return grad_input;;
17593 }
17594 } else {
17595 at::Tensor tmp_output;
17596 {
17597 at::AutoDispatchSkipFunctionalize guard;
17598 tmp_output = at::_ops::logit_backward::call(grad_output_, self_, eps);
17599 }
17600 at::functionalization::impl::replace_(grad_input, tmp_output);
17601 at::functionalization::impl::commit_update(grad_input);
17602 at::functionalization::impl::sync(grad_input);
17603 return grad_input;
17604 }
17605 }
17606
17607 at::Tensor & thnn_conv2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
17608 if (false) {
17609 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17610 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17611 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17612 auto self_meta = to_meta(self);
17613 auto weight_meta = to_meta(weight);
17614 auto bias_meta = to_meta(bias);
17615 auto out_meta = to_meta(out);
17616 at::AutoDispatchSkipFunctionalize func_guard;
17617 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17618 at::_ops::thnn_conv2d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, out_meta);
17619 }
17620
17621 at::Tensor self_;
17622 if (at::functionalization::impl::isFunctionalTensor(self)) {
17623 at::functionalization::impl::sync(self);
17624 self_ = at::functionalization::impl::from_functional_tensor(self);
17625 } else {
17626 self_ = self;
17627 }
17628
17629 at::Tensor weight_;
17630 if (at::functionalization::impl::isFunctionalTensor(weight)) {
17631 at::functionalization::impl::sync(weight);
17632 weight_ = at::functionalization::impl::from_functional_tensor(weight);
17633 } else {
17634 weight_ = weight;
17635 }
17636
17637 c10::optional<at::Tensor> bias_;
17638 if (at::functionalization::impl::isFunctionalTensor(bias)) {
17639 at::functionalization::impl::sync(bias);
17640 bias_ = at::functionalization::impl::from_functional_tensor(bias);
17641 } else {
17642 bias_ = bias;
17643 }
17644
17645 at::Tensor out_;
17646 if (at::functionalization::impl::isFunctionalTensor(out)) {
17647 at::functionalization::impl::sync(out);
17648 out_ = at::functionalization::impl::from_functional_tensor(out);
17649 } else {
17650 out_ = out;
17651 }
17652 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
17653 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
17654 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17655 TORCH_INTERNAL_ASSERT(false,
17656 "mutating a non-functional tensor with a functional tensor is not allowed.",
17657 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17658 } else {
17659 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17660 at::AutoDispatchSkipFunctionalize guard;
17661 at::Tensor tmp_output = at::_ops::thnn_conv2d_out::call(self_, weight_, kernel_size, bias_, stride, padding, out_);
17662 return out;;
17663 }
17664 } else {
17665 at::Tensor tmp_output;
17666 {
17667 at::AutoDispatchSkipFunctionalize guard;
17668 tmp_output = at::_ops::thnn_conv2d::call(self_, weight_, kernel_size, bias_, stride, padding);
17669 }
17670 at::functionalization::impl::replace_(out, tmp_output);
17671 at::functionalization::impl::commit_update(out);
17672 at::functionalization::impl::sync(out);
17673 return out;
17674 }
17675 }
17676
17677 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_out_output_mask_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
17678 if (false) {
17679 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17680 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17681 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17682 auto grad_output_meta = to_meta(grad_output);
17683 auto self_meta = to_meta(self);
17684 auto weight_meta = to_meta(weight);
17685 auto out0_meta = to_meta(out0);
17686 auto out1_meta = to_meta(out1);
17687 auto out2_meta = to_meta(out2);
17688 at::AutoDispatchSkipFunctionalize func_guard;
17689 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17690 at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output_meta, self_meta, weight_meta, kernel_size, stride, padding, output_mask, out0_meta, out1_meta, out2_meta);
17691 }
17692
17693 at::Tensor grad_output_;
17694 if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
17695 at::functionalization::impl::sync(grad_output);
17696 grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
17697 } else {
17698 grad_output_ = grad_output;
17699 }
17700
17701 at::Tensor self_;
17702 if (at::functionalization::impl::isFunctionalTensor(self)) {
17703 at::functionalization::impl::sync(self);
17704 self_ = at::functionalization::impl::from_functional_tensor(self);
17705 } else {
17706 self_ = self;
17707 }
17708
17709 at::Tensor weight_;
17710 if (at::functionalization::impl::isFunctionalTensor(weight)) {
17711 at::functionalization::impl::sync(weight);
17712 weight_ = at::functionalization::impl::from_functional_tensor(weight);
17713 } else {
17714 weight_ = weight;
17715 }
17716
17717 at::Tensor out0_;
17718 if (at::functionalization::impl::isFunctionalTensor(out0)) {
17719 at::functionalization::impl::sync(out0);
17720 out0_ = at::functionalization::impl::from_functional_tensor(out0);
17721 } else {
17722 out0_ = out0;
17723 }
17724
17725 at::Tensor out1_;
17726 if (at::functionalization::impl::isFunctionalTensor(out1)) {
17727 at::functionalization::impl::sync(out1);
17728 out1_ = at::functionalization::impl::from_functional_tensor(out1);
17729 } else {
17730 out1_ = out1;
17731 }
17732
17733 at::Tensor out2_;
17734 if (at::functionalization::impl::isFunctionalTensor(out2)) {
17735 at::functionalization::impl::sync(out2);
17736 out2_ = at::functionalization::impl::from_functional_tensor(out2);
17737 } else {
17738 out2_ = out2;
17739 }
17740 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
17741 if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) {
17742 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17743 TORCH_INTERNAL_ASSERT(false,
17744 "mutating a non-functional tensor with a functional tensor is not allowed.",
17745 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17746 } else {
17747 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17748 at::AutoDispatchSkipFunctionalize guard;
17749 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output_, self_, weight_, kernel_size, stride, padding, output_mask, out0_, out1_, out2_);
17750 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);;
17751 }
17752 } else {
17753 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
17754 {
17755 at::AutoDispatchSkipFunctionalize guard;
17756 tmp_output = at::_ops::_slow_conv2d_backward_output_mask::call(grad_output_, self_, weight_, kernel_size, stride, padding, output_mask);
17757 }
17758 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
17759 at::functionalization::impl::commit_update(out0);
17760 at::functionalization::impl::sync(out0);
17761 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
17762 at::functionalization::impl::commit_update(out1);
17763 at::functionalization::impl::sync(out1);
17764 at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
17765 at::functionalization::impl::commit_update(out2);
17766 at::functionalization::impl::sync(out2);
17767 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
17768 }
17769 }
17770
17771 at::Tensor & slow_conv3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) {
17772 if (false) {
17773 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17774 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17775 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17776 auto self_meta = to_meta(self);
17777 auto weight_meta = to_meta(weight);
17778 auto bias_meta = to_meta(bias);
17779 auto out_meta = to_meta(out);
17780 at::AutoDispatchSkipFunctionalize func_guard;
17781 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17782 at::_ops::slow_conv3d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, out_meta);
17783 }
17784
17785 at::Tensor self_;
17786 if (at::functionalization::impl::isFunctionalTensor(self)) {
17787 at::functionalization::impl::sync(self);
17788 self_ = at::functionalization::impl::from_functional_tensor(self);
17789 } else {
17790 self_ = self;
17791 }
17792
17793 at::Tensor weight_;
17794 if (at::functionalization::impl::isFunctionalTensor(weight)) {
17795 at::functionalization::impl::sync(weight);
17796 weight_ = at::functionalization::impl::from_functional_tensor(weight);
17797 } else {
17798 weight_ = weight;
17799 }
17800
17801 c10::optional<at::Tensor> bias_;
17802 if (at::functionalization::impl::isFunctionalTensor(bias)) {
17803 at::functionalization::impl::sync(bias);
17804 bias_ = at::functionalization::impl::from_functional_tensor(bias);
17805 } else {
17806 bias_ = bias;
17807 }
17808
17809 at::Tensor out_;
17810 if (at::functionalization::impl::isFunctionalTensor(out)) {
17811 at::functionalization::impl::sync(out);
17812 out_ = at::functionalization::impl::from_functional_tensor(out);
17813 } else {
17814 out_ = out;
17815 }
17816 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
17817 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
17818 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17819 TORCH_INTERNAL_ASSERT(false,
17820 "mutating a non-functional tensor with a functional tensor is not allowed.",
17821 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17822 } else {
17823 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17824 at::AutoDispatchSkipFunctionalize guard;
17825 at::Tensor tmp_output = at::_ops::slow_conv3d_out::call(self_, weight_, kernel_size, bias_, stride, padding, out_);
17826 return out;;
17827 }
17828 } else {
17829 at::Tensor tmp_output;
17830 {
17831 at::AutoDispatchSkipFunctionalize guard;
17832 tmp_output = at::_ops::slow_conv3d::call(self_, weight_, kernel_size, bias_, stride, padding);
17833 }
17834 at::functionalization::impl::replace_(out, tmp_output);
17835 at::functionalization::impl::commit_update(out);
17836 at::functionalization::impl::sync(out);
17837 return out;
17838 }
17839 }
17840
17841 at::Tensor & slow_conv3d_forward_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
17842 if (false) {
17843 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17844 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17845 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17846 auto self_meta = to_meta(self);
17847 auto weight_meta = to_meta(weight);
17848 auto bias_meta = to_meta(bias);
17849 auto output_meta = to_meta(output);
17850 at::AutoDispatchSkipFunctionalize func_guard;
17851 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17852 at::_ops::slow_conv3d_forward_output::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, output_meta);
17853 }
17854
17855 at::Tensor self_;
17856 if (at::functionalization::impl::isFunctionalTensor(self)) {
17857 at::functionalization::impl::sync(self);
17858 self_ = at::functionalization::impl::from_functional_tensor(self);
17859 } else {
17860 self_ = self;
17861 }
17862
17863 at::Tensor weight_;
17864 if (at::functionalization::impl::isFunctionalTensor(weight)) {
17865 at::functionalization::impl::sync(weight);
17866 weight_ = at::functionalization::impl::from_functional_tensor(weight);
17867 } else {
17868 weight_ = weight;
17869 }
17870
17871 c10::optional<at::Tensor> bias_;
17872 if (at::functionalization::impl::isFunctionalTensor(bias)) {
17873 at::functionalization::impl::sync(bias);
17874 bias_ = at::functionalization::impl::from_functional_tensor(bias);
17875 } else {
17876 bias_ = bias;
17877 }
17878
17879 at::Tensor output_;
17880 if (at::functionalization::impl::isFunctionalTensor(output)) {
17881 at::functionalization::impl::sync(output);
17882 output_ = at::functionalization::impl::from_functional_tensor(output);
17883 } else {
17884 output_ = output;
17885 }
17886 if (!(true && at::functionalization::impl::isFunctionalTensor(output))) {
17887 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
17888 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17889 TORCH_INTERNAL_ASSERT(false,
17890 "mutating a non-functional tensor with a functional tensor is not allowed.",
17891 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17892 } else {
17893 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17894 at::AutoDispatchSkipFunctionalize guard;
17895 at::Tensor tmp_output = at::_ops::slow_conv3d_forward_output::call(self_, weight_, kernel_size, bias_, stride, padding, output_);
17896 return output;;
17897 }
17898 } else {
17899 at::Tensor tmp_output;
17900 {
17901 at::AutoDispatchSkipFunctionalize guard;
17902 tmp_output = at::_ops::slow_conv3d_forward::call(self_, weight_, kernel_size, bias_, stride, padding);
17903 }
17904 at::functionalization::impl::replace_(output, tmp_output);
17905 at::functionalization::impl::commit_update(output);
17906 at::functionalization::impl::sync(output);
17907 return output;
17908 }
17909 }
17910
17911 at::Tensor & slow_conv_dilated3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
17912 if (false) {
17913 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17914 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17915 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17916 auto self_meta = to_meta(self);
17917 auto weight_meta = to_meta(weight);
17918 auto bias_meta = to_meta(bias);
17919 auto out_meta = to_meta(out);
17920 at::AutoDispatchSkipFunctionalize func_guard;
17921 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17922 at::_ops::slow_conv_dilated3d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, dilation, out_meta);
17923 }
17924
17925 at::Tensor self_;
17926 if (at::functionalization::impl::isFunctionalTensor(self)) {
17927 at::functionalization::impl::sync(self);
17928 self_ = at::functionalization::impl::from_functional_tensor(self);
17929 } else {
17930 self_ = self;
17931 }
17932
17933 at::Tensor weight_;
17934 if (at::functionalization::impl::isFunctionalTensor(weight)) {
17935 at::functionalization::impl::sync(weight);
17936 weight_ = at::functionalization::impl::from_functional_tensor(weight);
17937 } else {
17938 weight_ = weight;
17939 }
17940
17941 c10::optional<at::Tensor> bias_;
17942 if (at::functionalization::impl::isFunctionalTensor(bias)) {
17943 at::functionalization::impl::sync(bias);
17944 bias_ = at::functionalization::impl::from_functional_tensor(bias);
17945 } else {
17946 bias_ = bias;
17947 }
17948
17949 at::Tensor out_;
17950 if (at::functionalization::impl::isFunctionalTensor(out)) {
17951 at::functionalization::impl::sync(out);
17952 out_ = at::functionalization::impl::from_functional_tensor(out);
17953 } else {
17954 out_ = out;
17955 }
17956 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
17957 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
17958 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
17959 TORCH_INTERNAL_ASSERT(false,
17960 "mutating a non-functional tensor with a functional tensor is not allowed.",
17961 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
17962 } else {
17963 // case 2: arguments are not functional tensors, so we no-op and redispatch.
17964 at::AutoDispatchSkipFunctionalize guard;
17965 at::Tensor tmp_output = at::_ops::slow_conv_dilated3d_out::call(self_, weight_, kernel_size, bias_, stride, padding, dilation, out_);
17966 return out;;
17967 }
17968 } else {
17969 at::Tensor tmp_output;
17970 {
17971 at::AutoDispatchSkipFunctionalize guard;
17972 tmp_output = at::_ops::slow_conv_dilated3d::call(self_, weight_, kernel_size, bias_, stride, padding, dilation);
17973 }
17974 at::functionalization::impl::replace_(out, tmp_output);
17975 at::functionalization::impl::commit_update(out);
17976 at::functionalization::impl::sync(out);
17977 return out;
17978 }
17979 }
17980
17981 at::Tensor & special_log_ndtr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
17982 if (false) {
17983 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
17984 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
17985 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
17986 auto self_meta = to_meta(self);
17987 auto out_meta = to_meta(out);
17988 at::AutoDispatchSkipFunctionalize func_guard;
17989 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
17990 at::_ops::special_log_ndtr_out::call(self_meta, out_meta);
17991 }
17992
17993 at::Tensor self_;
17994 if (at::functionalization::impl::isFunctionalTensor(self)) {
17995 at::functionalization::impl::sync(self);
17996 self_ = at::functionalization::impl::from_functional_tensor(self);
17997 } else {
17998 self_ = self;
17999 }
18000
18001 at::Tensor out_;
18002 if (at::functionalization::impl::isFunctionalTensor(out)) {
18003 at::functionalization::impl::sync(out);
18004 out_ = at::functionalization::impl::from_functional_tensor(out);
18005 } else {
18006 out_ = out;
18007 }
18008 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18009 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18010 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18011 TORCH_INTERNAL_ASSERT(false,
18012 "mutating a non-functional tensor with a functional tensor is not allowed.",
18013 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18014 } else {
18015 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18016 at::AutoDispatchSkipFunctionalize guard;
18017 at::Tensor tmp_output = at::_ops::special_log_ndtr_out::call(self_, out_);
18018 return out;;
18019 }
18020 } else {
18021 at::Tensor tmp_output;
18022 {
18023 at::AutoDispatchSkipFunctionalize guard;
18024 tmp_output = at::_ops::special_log_ndtr::call(self_);
18025 }
18026 at::functionalization::impl::replace_(out, tmp_output);
18027 at::functionalization::impl::commit_update(out);
18028 at::functionalization::impl::sync(out);
18029 return out;
18030 }
18031 }
18032
18033 at::Tensor & special_exp2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
18034 if (false) {
18035 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18036 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18037 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18038 auto self_meta = to_meta(self);
18039 auto out_meta = to_meta(out);
18040 at::AutoDispatchSkipFunctionalize func_guard;
18041 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18042 at::_ops::special_exp2_out::call(self_meta, out_meta);
18043 }
18044
18045 at::Tensor self_;
18046 if (at::functionalization::impl::isFunctionalTensor(self)) {
18047 at::functionalization::impl::sync(self);
18048 self_ = at::functionalization::impl::from_functional_tensor(self);
18049 } else {
18050 self_ = self;
18051 }
18052
18053 at::Tensor out_;
18054 if (at::functionalization::impl::isFunctionalTensor(out)) {
18055 at::functionalization::impl::sync(out);
18056 out_ = at::functionalization::impl::from_functional_tensor(out);
18057 } else {
18058 out_ = out;
18059 }
18060 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18061 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18062 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18063 TORCH_INTERNAL_ASSERT(false,
18064 "mutating a non-functional tensor with a functional tensor is not allowed.",
18065 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18066 } else {
18067 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18068 at::AutoDispatchSkipFunctionalize guard;
18069 at::Tensor tmp_output = at::_ops::special_exp2_out::call(self_, out_);
18070 return out;;
18071 }
18072 } else {
18073 at::Tensor tmp_output;
18074 {
18075 at::AutoDispatchSkipFunctionalize guard;
18076 tmp_output = at::_ops::special_exp2::call(self_);
18077 }
18078 at::functionalization::impl::replace_(out, tmp_output);
18079 at::functionalization::impl::commit_update(out);
18080 at::functionalization::impl::sync(out);
18081 return out;
18082 }
18083 }
18084
18085 at::Tensor & special_digamma_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
18086 if (false) {
18087 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18088 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18089 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18090 auto self_meta = to_meta(self);
18091 auto out_meta = to_meta(out);
18092 at::AutoDispatchSkipFunctionalize func_guard;
18093 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18094 at::_ops::special_digamma_out::call(self_meta, out_meta);
18095 }
18096
18097 at::Tensor self_;
18098 if (at::functionalization::impl::isFunctionalTensor(self)) {
18099 at::functionalization::impl::sync(self);
18100 self_ = at::functionalization::impl::from_functional_tensor(self);
18101 } else {
18102 self_ = self;
18103 }
18104
18105 at::Tensor out_;
18106 if (at::functionalization::impl::isFunctionalTensor(out)) {
18107 at::functionalization::impl::sync(out);
18108 out_ = at::functionalization::impl::from_functional_tensor(out);
18109 } else {
18110 out_ = out;
18111 }
18112 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18113 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18114 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18115 TORCH_INTERNAL_ASSERT(false,
18116 "mutating a non-functional tensor with a functional tensor is not allowed.",
18117 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18118 } else {
18119 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18120 at::AutoDispatchSkipFunctionalize guard;
18121 at::Tensor tmp_output = at::_ops::special_digamma_out::call(self_, out_);
18122 return out;;
18123 }
18124 } else {
18125 at::Tensor tmp_output;
18126 {
18127 at::AutoDispatchSkipFunctionalize guard;
18128 tmp_output = at::_ops::special_digamma::call(self_);
18129 }
18130 at::functionalization::impl::replace_(out, tmp_output);
18131 at::functionalization::impl::commit_update(out);
18132 at::functionalization::impl::sync(out);
18133 return out;
18134 }
18135 }
18136
18137 at::Tensor & special_gammaln_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
18138 if (false) {
18139 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18140 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18141 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18142 auto self_meta = to_meta(self);
18143 auto out_meta = to_meta(out);
18144 at::AutoDispatchSkipFunctionalize func_guard;
18145 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18146 at::_ops::special_gammaln_out::call(self_meta, out_meta);
18147 }
18148
18149 at::Tensor self_;
18150 if (at::functionalization::impl::isFunctionalTensor(self)) {
18151 at::functionalization::impl::sync(self);
18152 self_ = at::functionalization::impl::from_functional_tensor(self);
18153 } else {
18154 self_ = self;
18155 }
18156
18157 at::Tensor out_;
18158 if (at::functionalization::impl::isFunctionalTensor(out)) {
18159 at::functionalization::impl::sync(out);
18160 out_ = at::functionalization::impl::from_functional_tensor(out);
18161 } else {
18162 out_ = out;
18163 }
18164 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18165 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18166 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18167 TORCH_INTERNAL_ASSERT(false,
18168 "mutating a non-functional tensor with a functional tensor is not allowed.",
18169 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18170 } else {
18171 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18172 at::AutoDispatchSkipFunctionalize guard;
18173 at::Tensor tmp_output = at::_ops::special_gammaln_out::call(self_, out_);
18174 return out;;
18175 }
18176 } else {
18177 at::Tensor tmp_output;
18178 {
18179 at::AutoDispatchSkipFunctionalize guard;
18180 tmp_output = at::_ops::special_gammaln::call(self_);
18181 }
18182 at::functionalization::impl::replace_(out, tmp_output);
18183 at::functionalization::impl::commit_update(out);
18184 at::functionalization::impl::sync(out);
18185 return out;
18186 }
18187 }
18188
18189 at::Tensor & special_erfcx_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
18190 if (false) {
18191 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18192 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18193 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18194 auto self_meta = to_meta(self);
18195 auto out_meta = to_meta(out);
18196 at::AutoDispatchSkipFunctionalize func_guard;
18197 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18198 at::_ops::special_erfcx_out::call(self_meta, out_meta);
18199 }
18200
18201 at::Tensor self_;
18202 if (at::functionalization::impl::isFunctionalTensor(self)) {
18203 at::functionalization::impl::sync(self);
18204 self_ = at::functionalization::impl::from_functional_tensor(self);
18205 } else {
18206 self_ = self;
18207 }
18208
18209 at::Tensor out_;
18210 if (at::functionalization::impl::isFunctionalTensor(out)) {
18211 at::functionalization::impl::sync(out);
18212 out_ = at::functionalization::impl::from_functional_tensor(out);
18213 } else {
18214 out_ = out;
18215 }
18216 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18217 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18218 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18219 TORCH_INTERNAL_ASSERT(false,
18220 "mutating a non-functional tensor with a functional tensor is not allowed.",
18221 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18222 } else {
18223 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18224 at::AutoDispatchSkipFunctionalize guard;
18225 at::Tensor tmp_output = at::_ops::special_erfcx_out::call(self_, out_);
18226 return out;;
18227 }
18228 } else {
18229 at::Tensor tmp_output;
18230 {
18231 at::AutoDispatchSkipFunctionalize guard;
18232 tmp_output = at::_ops::special_erfcx::call(self_);
18233 }
18234 at::functionalization::impl::replace_(out, tmp_output);
18235 at::functionalization::impl::commit_update(out);
18236 at::functionalization::impl::sync(out);
18237 return out;
18238 }
18239 }
18240
18241 at::Tensor & special_xlog1py_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
18242 if (false) {
18243 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18244 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18245 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18246 auto self_meta = to_meta(self);
18247 auto other_meta = to_meta(other);
18248 auto out_meta = to_meta(out);
18249 at::AutoDispatchSkipFunctionalize func_guard;
18250 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18251 at::_ops::special_xlog1py_out::call(self_meta, other_meta, out_meta);
18252 }
18253
18254 at::Tensor self_;
18255 if (at::functionalization::impl::isFunctionalTensor(self)) {
18256 at::functionalization::impl::sync(self);
18257 self_ = at::functionalization::impl::from_functional_tensor(self);
18258 } else {
18259 self_ = self;
18260 }
18261
18262 at::Tensor other_;
18263 if (at::functionalization::impl::isFunctionalTensor(other)) {
18264 at::functionalization::impl::sync(other);
18265 other_ = at::functionalization::impl::from_functional_tensor(other);
18266 } else {
18267 other_ = other;
18268 }
18269
18270 at::Tensor out_;
18271 if (at::functionalization::impl::isFunctionalTensor(out)) {
18272 at::functionalization::impl::sync(out);
18273 out_ = at::functionalization::impl::from_functional_tensor(out);
18274 } else {
18275 out_ = out;
18276 }
18277 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18278 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
18279 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18280 TORCH_INTERNAL_ASSERT(false,
18281 "mutating a non-functional tensor with a functional tensor is not allowed.",
18282 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18283 } else {
18284 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18285 at::AutoDispatchSkipFunctionalize guard;
18286 at::Tensor tmp_output = at::_ops::special_xlog1py_out::call(self_, other_, out_);
18287 return out;;
18288 }
18289 } else {
18290 at::Tensor tmp_output;
18291 {
18292 at::AutoDispatchSkipFunctionalize guard;
18293 tmp_output = at::_ops::special_xlog1py::call(self_, other_);
18294 }
18295 at::functionalization::impl::replace_(out, tmp_output);
18296 at::functionalization::impl::commit_update(out);
18297 at::functionalization::impl::sync(out);
18298 return out;
18299 }
18300 }
18301
18302 at::Tensor & special_xlog1py_out_self_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
18303 if (false) {
18304 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18305 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18306 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18307 auto other_meta = to_meta(other);
18308 auto out_meta = to_meta(out);
18309 at::AutoDispatchSkipFunctionalize func_guard;
18310 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18311 at::_ops::special_xlog1py_self_scalar_out::call(self, other_meta, out_meta);
18312 }
18313
18314 at::Tensor other_;
18315 if (at::functionalization::impl::isFunctionalTensor(other)) {
18316 at::functionalization::impl::sync(other);
18317 other_ = at::functionalization::impl::from_functional_tensor(other);
18318 } else {
18319 other_ = other;
18320 }
18321
18322 at::Tensor out_;
18323 if (at::functionalization::impl::isFunctionalTensor(out)) {
18324 at::functionalization::impl::sync(out);
18325 out_ = at::functionalization::impl::from_functional_tensor(out);
18326 } else {
18327 out_ = out;
18328 }
18329 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18330 if ((false || at::functionalization::impl::isFunctionalTensor(other))) {
18331 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18332 TORCH_INTERNAL_ASSERT(false,
18333 "mutating a non-functional tensor with a functional tensor is not allowed.",
18334 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18335 } else {
18336 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18337 at::AutoDispatchSkipFunctionalize guard;
18338 at::Tensor tmp_output = at::_ops::special_xlog1py_self_scalar_out::call(self, other_, out_);
18339 return out;;
18340 }
18341 } else {
18342 at::Tensor tmp_output;
18343 {
18344 at::AutoDispatchSkipFunctionalize guard;
18345 tmp_output = at::_ops::special_xlog1py_self_scalar::call(self, other_);
18346 }
18347 at::functionalization::impl::replace_(out, tmp_output);
18348 at::functionalization::impl::commit_update(out);
18349 at::functionalization::impl::sync(out);
18350 return out;
18351 }
18352 }
18353
18354 at::Tensor & special_xlog1py_out_other_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
18355 if (false) {
18356 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18357 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18358 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18359 auto self_meta = to_meta(self);
18360 auto out_meta = to_meta(out);
18361 at::AutoDispatchSkipFunctionalize func_guard;
18362 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18363 at::_ops::special_xlog1py_other_scalar_out::call(self_meta, other, out_meta);
18364 }
18365
18366 at::Tensor self_;
18367 if (at::functionalization::impl::isFunctionalTensor(self)) {
18368 at::functionalization::impl::sync(self);
18369 self_ = at::functionalization::impl::from_functional_tensor(self);
18370 } else {
18371 self_ = self;
18372 }
18373
18374 at::Tensor out_;
18375 if (at::functionalization::impl::isFunctionalTensor(out)) {
18376 at::functionalization::impl::sync(out);
18377 out_ = at::functionalization::impl::from_functional_tensor(out);
18378 } else {
18379 out_ = out;
18380 }
18381 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18382 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18383 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18384 TORCH_INTERNAL_ASSERT(false,
18385 "mutating a non-functional tensor with a functional tensor is not allowed.",
18386 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18387 } else {
18388 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18389 at::AutoDispatchSkipFunctionalize guard;
18390 at::Tensor tmp_output = at::_ops::special_xlog1py_other_scalar_out::call(self_, other, out_);
18391 return out;;
18392 }
18393 } else {
18394 at::Tensor tmp_output;
18395 {
18396 at::AutoDispatchSkipFunctionalize guard;
18397 tmp_output = at::_ops::special_xlog1py_other_scalar::call(self_, other);
18398 }
18399 at::functionalization::impl::replace_(out, tmp_output);
18400 at::functionalization::impl::commit_update(out);
18401 at::functionalization::impl::sync(out);
18402 return out;
18403 }
18404 }
18405
18406 at::Tensor & special_i1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
18407 if (false) {
18408 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18409 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18410 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18411 auto self_meta = to_meta(self);
18412 auto out_meta = to_meta(out);
18413 at::AutoDispatchSkipFunctionalize func_guard;
18414 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18415 at::_ops::special_i1_out::call(self_meta, out_meta);
18416 }
18417
18418 at::Tensor self_;
18419 if (at::functionalization::impl::isFunctionalTensor(self)) {
18420 at::functionalization::impl::sync(self);
18421 self_ = at::functionalization::impl::from_functional_tensor(self);
18422 } else {
18423 self_ = self;
18424 }
18425
18426 at::Tensor out_;
18427 if (at::functionalization::impl::isFunctionalTensor(out)) {
18428 at::functionalization::impl::sync(out);
18429 out_ = at::functionalization::impl::from_functional_tensor(out);
18430 } else {
18431 out_ = out;
18432 }
18433 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18434 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18435 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18436 TORCH_INTERNAL_ASSERT(false,
18437 "mutating a non-functional tensor with a functional tensor is not allowed.",
18438 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18439 } else {
18440 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18441 at::AutoDispatchSkipFunctionalize guard;
18442 at::Tensor tmp_output = at::_ops::special_i1_out::call(self_, out_);
18443 return out;;
18444 }
18445 } else {
18446 at::Tensor tmp_output;
18447 {
18448 at::AutoDispatchSkipFunctionalize guard;
18449 tmp_output = at::_ops::special_i1::call(self_);
18450 }
18451 at::functionalization::impl::replace_(out, tmp_output);
18452 at::functionalization::impl::commit_update(out);
18453 at::functionalization::impl::sync(out);
18454 return out;
18455 }
18456 }
18457
18458 at::Tensor & special_i1e_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
18459 if (false) {
18460 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18461 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18462 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18463 auto self_meta = to_meta(self);
18464 auto out_meta = to_meta(out);
18465 at::AutoDispatchSkipFunctionalize func_guard;
18466 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18467 at::_ops::special_i1e_out::call(self_meta, out_meta);
18468 }
18469
18470 at::Tensor self_;
18471 if (at::functionalization::impl::isFunctionalTensor(self)) {
18472 at::functionalization::impl::sync(self);
18473 self_ = at::functionalization::impl::from_functional_tensor(self);
18474 } else {
18475 self_ = self;
18476 }
18477
18478 at::Tensor out_;
18479 if (at::functionalization::impl::isFunctionalTensor(out)) {
18480 at::functionalization::impl::sync(out);
18481 out_ = at::functionalization::impl::from_functional_tensor(out);
18482 } else {
18483 out_ = out;
18484 }
18485 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18486 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18487 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18488 TORCH_INTERNAL_ASSERT(false,
18489 "mutating a non-functional tensor with a functional tensor is not allowed.",
18490 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18491 } else {
18492 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18493 at::AutoDispatchSkipFunctionalize guard;
18494 at::Tensor tmp_output = at::_ops::special_i1e_out::call(self_, out_);
18495 return out;;
18496 }
18497 } else {
18498 at::Tensor tmp_output;
18499 {
18500 at::AutoDispatchSkipFunctionalize guard;
18501 tmp_output = at::_ops::special_i1e::call(self_);
18502 }
18503 at::functionalization::impl::replace_(out, tmp_output);
18504 at::functionalization::impl::commit_update(out);
18505 at::functionalization::impl::sync(out);
18506 return out;
18507 }
18508 }
18509
18510 at::Tensor & fft_fft_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
18511 if (false) {
18512 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18513 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18514 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18515 auto self_meta = to_meta(self);
18516 auto out_meta = to_meta(out);
18517 at::AutoDispatchSkipFunctionalize func_guard;
18518 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18519 at::_ops::fft_fft_out::call(self_meta, n, dim, norm, out_meta);
18520 }
18521
18522 at::Tensor self_;
18523 if (at::functionalization::impl::isFunctionalTensor(self)) {
18524 at::functionalization::impl::sync(self);
18525 self_ = at::functionalization::impl::from_functional_tensor(self);
18526 } else {
18527 self_ = self;
18528 }
18529
18530 at::Tensor out_;
18531 if (at::functionalization::impl::isFunctionalTensor(out)) {
18532 at::functionalization::impl::sync(out);
18533 out_ = at::functionalization::impl::from_functional_tensor(out);
18534 } else {
18535 out_ = out;
18536 }
18537 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18538 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18539 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18540 TORCH_INTERNAL_ASSERT(false,
18541 "mutating a non-functional tensor with a functional tensor is not allowed.",
18542 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18543 } else {
18544 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18545 at::AutoDispatchSkipFunctionalize guard;
18546 at::Tensor tmp_output = at::_ops::fft_fft_out::call(self_, n, dim, norm, out_);
18547 return out;;
18548 }
18549 } else {
18550 at::Tensor tmp_output;
18551 {
18552 at::AutoDispatchSkipFunctionalize guard;
18553 tmp_output = at::_ops::fft_fft::call(self_, n, dim, norm);
18554 }
18555 at::functionalization::impl::replace_(out, tmp_output);
18556 at::functionalization::impl::commit_update(out);
18557 at::functionalization::impl::sync(out);
18558 return out;
18559 }
18560 }
18561
18562 at::Tensor & fft_rfft_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
18563 if (false) {
18564 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18565 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18566 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18567 auto self_meta = to_meta(self);
18568 auto out_meta = to_meta(out);
18569 at::AutoDispatchSkipFunctionalize func_guard;
18570 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18571 at::_ops::fft_rfft_out::call(self_meta, n, dim, norm, out_meta);
18572 }
18573
18574 at::Tensor self_;
18575 if (at::functionalization::impl::isFunctionalTensor(self)) {
18576 at::functionalization::impl::sync(self);
18577 self_ = at::functionalization::impl::from_functional_tensor(self);
18578 } else {
18579 self_ = self;
18580 }
18581
18582 at::Tensor out_;
18583 if (at::functionalization::impl::isFunctionalTensor(out)) {
18584 at::functionalization::impl::sync(out);
18585 out_ = at::functionalization::impl::from_functional_tensor(out);
18586 } else {
18587 out_ = out;
18588 }
18589 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18590 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18591 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18592 TORCH_INTERNAL_ASSERT(false,
18593 "mutating a non-functional tensor with a functional tensor is not allowed.",
18594 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18595 } else {
18596 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18597 at::AutoDispatchSkipFunctionalize guard;
18598 at::Tensor tmp_output = at::_ops::fft_rfft_out::call(self_, n, dim, norm, out_);
18599 return out;;
18600 }
18601 } else {
18602 at::Tensor tmp_output;
18603 {
18604 at::AutoDispatchSkipFunctionalize guard;
18605 tmp_output = at::_ops::fft_rfft::call(self_, n, dim, norm);
18606 }
18607 at::functionalization::impl::replace_(out, tmp_output);
18608 at::functionalization::impl::commit_update(out);
18609 at::functionalization::impl::sync(out);
18610 return out;
18611 }
18612 }
18613
18614 at::Tensor & fft_hfft_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
18615 if (false) {
18616 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18617 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18618 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18619 auto self_meta = to_meta(self);
18620 auto out_meta = to_meta(out);
18621 at::AutoDispatchSkipFunctionalize func_guard;
18622 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18623 at::_ops::fft_hfft_out::call(self_meta, n, dim, norm, out_meta);
18624 }
18625
18626 at::Tensor self_;
18627 if (at::functionalization::impl::isFunctionalTensor(self)) {
18628 at::functionalization::impl::sync(self);
18629 self_ = at::functionalization::impl::from_functional_tensor(self);
18630 } else {
18631 self_ = self;
18632 }
18633
18634 at::Tensor out_;
18635 if (at::functionalization::impl::isFunctionalTensor(out)) {
18636 at::functionalization::impl::sync(out);
18637 out_ = at::functionalization::impl::from_functional_tensor(out);
18638 } else {
18639 out_ = out;
18640 }
18641 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18642 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18643 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18644 TORCH_INTERNAL_ASSERT(false,
18645 "mutating a non-functional tensor with a functional tensor is not allowed.",
18646 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18647 } else {
18648 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18649 at::AutoDispatchSkipFunctionalize guard;
18650 at::Tensor tmp_output = at::_ops::fft_hfft_out::call(self_, n, dim, norm, out_);
18651 return out;;
18652 }
18653 } else {
18654 at::Tensor tmp_output;
18655 {
18656 at::AutoDispatchSkipFunctionalize guard;
18657 tmp_output = at::_ops::fft_hfft::call(self_, n, dim, norm);
18658 }
18659 at::functionalization::impl::replace_(out, tmp_output);
18660 at::functionalization::impl::commit_update(out);
18661 at::functionalization::impl::sync(out);
18662 return out;
18663 }
18664 }
18665
18666 const at::Tensor & fft_hfft2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
18667 if (false) {
18668 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18669 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18670 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18671 auto self_meta = to_meta(self);
18672 auto out_meta = to_meta(out);
18673 at::AutoDispatchSkipFunctionalize func_guard;
18674 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18675 at::_ops::fft_hfft2_out::call(self_meta, s, dim, norm, out_meta);
18676 }
18677
18678 at::Tensor self_;
18679 if (at::functionalization::impl::isFunctionalTensor(self)) {
18680 at::functionalization::impl::sync(self);
18681 self_ = at::functionalization::impl::from_functional_tensor(self);
18682 } else {
18683 self_ = self;
18684 }
18685
18686 at::Tensor out_;
18687 if (at::functionalization::impl::isFunctionalTensor(out)) {
18688 at::functionalization::impl::sync(out);
18689 out_ = at::functionalization::impl::from_functional_tensor(out);
18690 } else {
18691 out_ = out;
18692 }
18693 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18694 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18695 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18696 TORCH_INTERNAL_ASSERT(false,
18697 "mutating a non-functional tensor with a functional tensor is not allowed.",
18698 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18699 } else {
18700 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18701 at::AutoDispatchSkipFunctionalize guard;
18702 at::Tensor tmp_output = at::_ops::fft_hfft2_out::call(self_, s, dim, norm, out_);
18703 return out;;
18704 }
18705 } else {
18706 at::Tensor tmp_output;
18707 {
18708 at::AutoDispatchSkipFunctionalize guard;
18709 tmp_output = at::_ops::fft_hfft2::call(self_, s, dim, norm);
18710 }
18711 at::functionalization::impl::replace_(out, tmp_output);
18712 at::functionalization::impl::commit_update(out);
18713 at::functionalization::impl::sync(out);
18714 return out;
18715 }
18716 }
18717
18718 at::Tensor & fft_ifftn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
18719 if (false) {
18720 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18721 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18722 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18723 auto self_meta = to_meta(self);
18724 auto out_meta = to_meta(out);
18725 at::AutoDispatchSkipFunctionalize func_guard;
18726 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18727 at::_ops::fft_ifftn_out::call(self_meta, s, dim, norm, out_meta);
18728 }
18729
18730 at::Tensor self_;
18731 if (at::functionalization::impl::isFunctionalTensor(self)) {
18732 at::functionalization::impl::sync(self);
18733 self_ = at::functionalization::impl::from_functional_tensor(self);
18734 } else {
18735 self_ = self;
18736 }
18737
18738 at::Tensor out_;
18739 if (at::functionalization::impl::isFunctionalTensor(out)) {
18740 at::functionalization::impl::sync(out);
18741 out_ = at::functionalization::impl::from_functional_tensor(out);
18742 } else {
18743 out_ = out;
18744 }
18745 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18746 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18747 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18748 TORCH_INTERNAL_ASSERT(false,
18749 "mutating a non-functional tensor with a functional tensor is not allowed.",
18750 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18751 } else {
18752 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18753 at::AutoDispatchSkipFunctionalize guard;
18754 at::Tensor tmp_output = at::_ops::fft_ifftn_out::call(self_, s, dim, norm, out_);
18755 return out;;
18756 }
18757 } else {
18758 at::Tensor tmp_output;
18759 {
18760 at::AutoDispatchSkipFunctionalize guard;
18761 tmp_output = at::_ops::fft_ifftn::call(self_, s, dim, norm);
18762 }
18763 at::functionalization::impl::replace_(out, tmp_output);
18764 at::functionalization::impl::commit_update(out);
18765 at::functionalization::impl::sync(out);
18766 return out;
18767 }
18768 }
18769
18770 at::Tensor & fft_rfftn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
18771 if (false) {
18772 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18773 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18774 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18775 auto self_meta = to_meta(self);
18776 auto out_meta = to_meta(out);
18777 at::AutoDispatchSkipFunctionalize func_guard;
18778 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18779 at::_ops::fft_rfftn_out::call(self_meta, s, dim, norm, out_meta);
18780 }
18781
18782 at::Tensor self_;
18783 if (at::functionalization::impl::isFunctionalTensor(self)) {
18784 at::functionalization::impl::sync(self);
18785 self_ = at::functionalization::impl::from_functional_tensor(self);
18786 } else {
18787 self_ = self;
18788 }
18789
18790 at::Tensor out_;
18791 if (at::functionalization::impl::isFunctionalTensor(out)) {
18792 at::functionalization::impl::sync(out);
18793 out_ = at::functionalization::impl::from_functional_tensor(out);
18794 } else {
18795 out_ = out;
18796 }
18797 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18798 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18799 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18800 TORCH_INTERNAL_ASSERT(false,
18801 "mutating a non-functional tensor with a functional tensor is not allowed.",
18802 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18803 } else {
18804 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18805 at::AutoDispatchSkipFunctionalize guard;
18806 at::Tensor tmp_output = at::_ops::fft_rfftn_out::call(self_, s, dim, norm, out_);
18807 return out;;
18808 }
18809 } else {
18810 at::Tensor tmp_output;
18811 {
18812 at::AutoDispatchSkipFunctionalize guard;
18813 tmp_output = at::_ops::fft_rfftn::call(self_, s, dim, norm);
18814 }
18815 at::functionalization::impl::replace_(out, tmp_output);
18816 at::functionalization::impl::commit_update(out);
18817 at::functionalization::impl::sync(out);
18818 return out;
18819 }
18820 }
18821
18822 const at::Tensor & fft_hfftn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
18823 if (false) {
18824 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18825 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18826 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18827 auto self_meta = to_meta(self);
18828 auto out_meta = to_meta(out);
18829 at::AutoDispatchSkipFunctionalize func_guard;
18830 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18831 at::_ops::fft_hfftn_out::call(self_meta, s, dim, norm, out_meta);
18832 }
18833
18834 at::Tensor self_;
18835 if (at::functionalization::impl::isFunctionalTensor(self)) {
18836 at::functionalization::impl::sync(self);
18837 self_ = at::functionalization::impl::from_functional_tensor(self);
18838 } else {
18839 self_ = self;
18840 }
18841
18842 at::Tensor out_;
18843 if (at::functionalization::impl::isFunctionalTensor(out)) {
18844 at::functionalization::impl::sync(out);
18845 out_ = at::functionalization::impl::from_functional_tensor(out);
18846 } else {
18847 out_ = out;
18848 }
18849 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18850 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
18851 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18852 TORCH_INTERNAL_ASSERT(false,
18853 "mutating a non-functional tensor with a functional tensor is not allowed.",
18854 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18855 } else {
18856 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18857 at::AutoDispatchSkipFunctionalize guard;
18858 at::Tensor tmp_output = at::_ops::fft_hfftn_out::call(self_, s, dim, norm, out_);
18859 return out;;
18860 }
18861 } else {
18862 at::Tensor tmp_output;
18863 {
18864 at::AutoDispatchSkipFunctionalize guard;
18865 tmp_output = at::_ops::fft_hfftn::call(self_, s, dim, norm);
18866 }
18867 at::functionalization::impl::replace_(out, tmp_output);
18868 at::functionalization::impl::commit_update(out);
18869 at::functionalization::impl::sync(out);
18870 return out;
18871 }
18872 }
18873
18874 at::Tensor & fft_fftfreq_out_out(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out) {
18875 if (false) {
18876 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18877 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18878 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18879 auto out_meta = to_meta(out);
18880 at::AutoDispatchSkipFunctionalize func_guard;
18881 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18882 at::_ops::fft_fftfreq_out::call(n, d, out_meta);
18883 }
18884
18885 at::Tensor out_;
18886 if (at::functionalization::impl::isFunctionalTensor(out)) {
18887 at::functionalization::impl::sync(out);
18888 out_ = at::functionalization::impl::from_functional_tensor(out);
18889 } else {
18890 out_ = out;
18891 }
18892 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18893 if ((false)) {
18894 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18895 TORCH_INTERNAL_ASSERT(false,
18896 "mutating a non-functional tensor with a functional tensor is not allowed.",
18897 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18898 } else {
18899 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18900 at::AutoDispatchSkipFunctionalize guard;
18901 at::Tensor tmp_output = at::_ops::fft_fftfreq_out::call(n, d, out_);
18902 return out;;
18903 }
18904 } else {
18905 at::Tensor tmp_output;
18906 {
18907 at::AutoDispatchSkipFunctionalize guard;
18908 tmp_output = at::_ops::fft_fftfreq::call(n, d, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt);
18909 }
18910 at::functionalization::impl::replace_(out, tmp_output);
18911 at::functionalization::impl::commit_update(out);
18912 at::functionalization::impl::sync(out);
18913 return out;
18914 }
18915 }
18916
18917 at::Tensor & linalg_det_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out) {
18918 if (false) {
18919 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18920 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18921 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18922 auto A_meta = to_meta(A);
18923 auto out_meta = to_meta(out);
18924 at::AutoDispatchSkipFunctionalize func_guard;
18925 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18926 at::_ops::linalg_det_out::call(A_meta, out_meta);
18927 }
18928
18929 at::Tensor A_;
18930 if (at::functionalization::impl::isFunctionalTensor(A)) {
18931 at::functionalization::impl::sync(A);
18932 A_ = at::functionalization::impl::from_functional_tensor(A);
18933 } else {
18934 A_ = A;
18935 }
18936
18937 at::Tensor out_;
18938 if (at::functionalization::impl::isFunctionalTensor(out)) {
18939 at::functionalization::impl::sync(out);
18940 out_ = at::functionalization::impl::from_functional_tensor(out);
18941 } else {
18942 out_ = out;
18943 }
18944 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
18945 if ((false || at::functionalization::impl::isFunctionalTensor(A))) {
18946 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
18947 TORCH_INTERNAL_ASSERT(false,
18948 "mutating a non-functional tensor with a functional tensor is not allowed.",
18949 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
18950 } else {
18951 // case 2: arguments are not functional tensors, so we no-op and redispatch.
18952 at::AutoDispatchSkipFunctionalize guard;
18953 at::Tensor tmp_output = at::_ops::linalg_det_out::call(A_, out_);
18954 return out;;
18955 }
18956 } else {
18957 at::Tensor tmp_output;
18958 {
18959 at::AutoDispatchSkipFunctionalize guard;
18960 tmp_output = at::_ops::linalg_det::call(A_);
18961 }
18962 at::functionalization::impl::replace_(out, tmp_output);
18963 at::functionalization::impl::commit_update(out);
18964 at::functionalization::impl::sync(out);
18965 return out;
18966 }
18967 }
18968
18969 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) {
18970 if (false) {
18971 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
18972 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
18973 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
18974 auto self_meta = to_meta(self);
18975 auto LD_meta = to_meta(LD);
18976 auto pivots_meta = to_meta(pivots);
18977 auto info_meta = to_meta(info);
18978 at::AutoDispatchSkipFunctionalize func_guard;
18979 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
18980 at::_ops::linalg_ldl_factor_ex_out::call(self_meta, hermitian, check_errors, LD_meta, pivots_meta, info_meta);
18981 }
18982
18983 at::Tensor self_;
18984 if (at::functionalization::impl::isFunctionalTensor(self)) {
18985 at::functionalization::impl::sync(self);
18986 self_ = at::functionalization::impl::from_functional_tensor(self);
18987 } else {
18988 self_ = self;
18989 }
18990
18991 at::Tensor LD_;
18992 if (at::functionalization::impl::isFunctionalTensor(LD)) {
18993 at::functionalization::impl::sync(LD);
18994 LD_ = at::functionalization::impl::from_functional_tensor(LD);
18995 } else {
18996 LD_ = LD;
18997 }
18998
18999 at::Tensor pivots_;
19000 if (at::functionalization::impl::isFunctionalTensor(pivots)) {
19001 at::functionalization::impl::sync(pivots);
19002 pivots_ = at::functionalization::impl::from_functional_tensor(pivots);
19003 } else {
19004 pivots_ = pivots;
19005 }
19006
19007 at::Tensor info_;
19008 if (at::functionalization::impl::isFunctionalTensor(info)) {
19009 at::functionalization::impl::sync(info);
19010 info_ = at::functionalization::impl::from_functional_tensor(info);
19011 } else {
19012 info_ = info;
19013 }
19014 if (!(true && at::functionalization::impl::isFunctionalTensor(LD) && at::functionalization::impl::isFunctionalTensor(pivots) && at::functionalization::impl::isFunctionalTensor(info))) {
19015 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
19016 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19017 TORCH_INTERNAL_ASSERT(false,
19018 "mutating a non-functional tensor with a functional tensor is not allowed.",
19019 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19020 } else {
19021 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19022 at::AutoDispatchSkipFunctionalize guard;
19023 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_ldl_factor_ex_out::call(self_, hermitian, check_errors, LD_, pivots_, info_);
19024 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(LD, pivots, info);;
19025 }
19026 } else {
19027 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
19028 {
19029 at::AutoDispatchSkipFunctionalize guard;
19030 tmp_output = at::_ops::linalg_ldl_factor_ex::call(self_, hermitian, check_errors);
19031 }
19032 at::functionalization::impl::replace_(LD, std::get<0>(tmp_output));
19033 at::functionalization::impl::commit_update(LD);
19034 at::functionalization::impl::sync(LD);
19035 at::functionalization::impl::replace_(pivots, std::get<1>(tmp_output));
19036 at::functionalization::impl::commit_update(pivots);
19037 at::functionalization::impl::sync(pivots);
19038 at::functionalization::impl::replace_(info, std::get<2>(tmp_output));
19039 at::functionalization::impl::commit_update(info);
19040 at::functionalization::impl::sync(info);
19041 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(LD, pivots, info);
19042 }
19043 }
19044
19045 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> linalg_lstsq_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) {
19046 if (false) {
19047 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19048 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19049 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19050 auto self_meta = to_meta(self);
19051 auto b_meta = to_meta(b);
19052 auto solution_meta = to_meta(solution);
19053 auto residuals_meta = to_meta(residuals);
19054 auto rank_meta = to_meta(rank);
19055 auto singular_values_meta = to_meta(singular_values);
19056 at::AutoDispatchSkipFunctionalize func_guard;
19057 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19058 at::_ops::linalg_lstsq_out::call(self_meta, b_meta, rcond, driver, solution_meta, residuals_meta, rank_meta, singular_values_meta);
19059 }
19060
19061 at::Tensor self_;
19062 if (at::functionalization::impl::isFunctionalTensor(self)) {
19063 at::functionalization::impl::sync(self);
19064 self_ = at::functionalization::impl::from_functional_tensor(self);
19065 } else {
19066 self_ = self;
19067 }
19068
19069 at::Tensor b_;
19070 if (at::functionalization::impl::isFunctionalTensor(b)) {
19071 at::functionalization::impl::sync(b);
19072 b_ = at::functionalization::impl::from_functional_tensor(b);
19073 } else {
19074 b_ = b;
19075 }
19076
19077 at::Tensor solution_;
19078 if (at::functionalization::impl::isFunctionalTensor(solution)) {
19079 at::functionalization::impl::sync(solution);
19080 solution_ = at::functionalization::impl::from_functional_tensor(solution);
19081 } else {
19082 solution_ = solution;
19083 }
19084
19085 at::Tensor residuals_;
19086 if (at::functionalization::impl::isFunctionalTensor(residuals)) {
19087 at::functionalization::impl::sync(residuals);
19088 residuals_ = at::functionalization::impl::from_functional_tensor(residuals);
19089 } else {
19090 residuals_ = residuals;
19091 }
19092
19093 at::Tensor rank_;
19094 if (at::functionalization::impl::isFunctionalTensor(rank)) {
19095 at::functionalization::impl::sync(rank);
19096 rank_ = at::functionalization::impl::from_functional_tensor(rank);
19097 } else {
19098 rank_ = rank;
19099 }
19100
19101 at::Tensor singular_values_;
19102 if (at::functionalization::impl::isFunctionalTensor(singular_values)) {
19103 at::functionalization::impl::sync(singular_values);
19104 singular_values_ = at::functionalization::impl::from_functional_tensor(singular_values);
19105 } else {
19106 singular_values_ = singular_values;
19107 }
19108 if (!(true && at::functionalization::impl::isFunctionalTensor(solution) && at::functionalization::impl::isFunctionalTensor(residuals) && at::functionalization::impl::isFunctionalTensor(rank) && at::functionalization::impl::isFunctionalTensor(singular_values))) {
19109 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(b))) {
19110 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19111 TORCH_INTERNAL_ASSERT(false,
19112 "mutating a non-functional tensor with a functional tensor is not allowed.",
19113 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19114 } else {
19115 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19116 at::AutoDispatchSkipFunctionalize guard;
19117 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_lstsq_out::call(self_, b_, rcond, driver, solution_, residuals_, rank_, singular_values_);
19118 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(solution, residuals, rank, singular_values);;
19119 }
19120 } else {
19121 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
19122 {
19123 at::AutoDispatchSkipFunctionalize guard;
19124 tmp_output = at::_ops::linalg_lstsq::call(self_, b_, rcond, driver);
19125 }
19126 at::functionalization::impl::replace_(solution, std::get<0>(tmp_output));
19127 at::functionalization::impl::commit_update(solution);
19128 at::functionalization::impl::sync(solution);
19129 at::functionalization::impl::replace_(residuals, std::get<1>(tmp_output));
19130 at::functionalization::impl::commit_update(residuals);
19131 at::functionalization::impl::sync(residuals);
19132 at::functionalization::impl::replace_(rank, std::get<2>(tmp_output));
19133 at::functionalization::impl::commit_update(rank);
19134 at::functionalization::impl::sync(rank);
19135 at::functionalization::impl::replace_(singular_values, std::get<3>(tmp_output));
19136 at::functionalization::impl::commit_update(singular_values);
19137 at::functionalization::impl::sync(singular_values);
19138 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(solution, residuals, rank, singular_values);
19139 }
19140 }
19141
19142 at::Tensor & linalg_matrix_exp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
19143 if (false) {
19144 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19145 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19146 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19147 auto self_meta = to_meta(self);
19148 auto out_meta = to_meta(out);
19149 at::AutoDispatchSkipFunctionalize func_guard;
19150 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19151 at::_ops::linalg_matrix_exp_out::call(self_meta, out_meta);
19152 }
19153
19154 at::Tensor self_;
19155 if (at::functionalization::impl::isFunctionalTensor(self)) {
19156 at::functionalization::impl::sync(self);
19157 self_ = at::functionalization::impl::from_functional_tensor(self);
19158 } else {
19159 self_ = self;
19160 }
19161
19162 at::Tensor out_;
19163 if (at::functionalization::impl::isFunctionalTensor(out)) {
19164 at::functionalization::impl::sync(out);
19165 out_ = at::functionalization::impl::from_functional_tensor(out);
19166 } else {
19167 out_ = out;
19168 }
19169 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19170 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
19171 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19172 TORCH_INTERNAL_ASSERT(false,
19173 "mutating a non-functional tensor with a functional tensor is not allowed.",
19174 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19175 } else {
19176 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19177 at::AutoDispatchSkipFunctionalize guard;
19178 at::Tensor tmp_output = at::_ops::linalg_matrix_exp_out::call(self_, out_);
19179 return out;;
19180 }
19181 } else {
19182 at::Tensor tmp_output;
19183 {
19184 at::AutoDispatchSkipFunctionalize guard;
19185 tmp_output = at::_ops::linalg_matrix_exp::call(self_);
19186 }
19187 at::functionalization::impl::replace_(out, tmp_output);
19188 at::functionalization::impl::commit_update(out);
19189 at::functionalization::impl::sync(out);
19190 return out;
19191 }
19192 }
19193
19194 ::std::tuple<at::Tensor &,at::Tensor &> linalg_slogdet_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) {
19195 if (false) {
19196 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19197 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19198 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19199 auto A_meta = to_meta(A);
19200 auto sign_meta = to_meta(sign);
19201 auto logabsdet_meta = to_meta(logabsdet);
19202 at::AutoDispatchSkipFunctionalize func_guard;
19203 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19204 at::_ops::linalg_slogdet_out::call(A_meta, sign_meta, logabsdet_meta);
19205 }
19206
19207 at::Tensor A_;
19208 if (at::functionalization::impl::isFunctionalTensor(A)) {
19209 at::functionalization::impl::sync(A);
19210 A_ = at::functionalization::impl::from_functional_tensor(A);
19211 } else {
19212 A_ = A;
19213 }
19214
19215 at::Tensor sign_;
19216 if (at::functionalization::impl::isFunctionalTensor(sign)) {
19217 at::functionalization::impl::sync(sign);
19218 sign_ = at::functionalization::impl::from_functional_tensor(sign);
19219 } else {
19220 sign_ = sign;
19221 }
19222
19223 at::Tensor logabsdet_;
19224 if (at::functionalization::impl::isFunctionalTensor(logabsdet)) {
19225 at::functionalization::impl::sync(logabsdet);
19226 logabsdet_ = at::functionalization::impl::from_functional_tensor(logabsdet);
19227 } else {
19228 logabsdet_ = logabsdet;
19229 }
19230 if (!(true && at::functionalization::impl::isFunctionalTensor(sign) && at::functionalization::impl::isFunctionalTensor(logabsdet))) {
19231 if ((false || at::functionalization::impl::isFunctionalTensor(A))) {
19232 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19233 TORCH_INTERNAL_ASSERT(false,
19234 "mutating a non-functional tensor with a functional tensor is not allowed.",
19235 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19236 } else {
19237 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19238 at::AutoDispatchSkipFunctionalize guard;
19239 ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_slogdet_out::call(A_, sign_, logabsdet_);
19240 return ::std::tuple<at::Tensor &,at::Tensor &>(sign, logabsdet);;
19241 }
19242 } else {
19243 ::std::tuple<at::Tensor,at::Tensor> tmp_output;
19244 {
19245 at::AutoDispatchSkipFunctionalize guard;
19246 tmp_output = at::_ops::linalg_slogdet::call(A_);
19247 }
19248 at::functionalization::impl::replace_(sign, std::get<0>(tmp_output));
19249 at::functionalization::impl::commit_update(sign);
19250 at::functionalization::impl::sync(sign);
19251 at::functionalization::impl::replace_(logabsdet, std::get<1>(tmp_output));
19252 at::functionalization::impl::commit_update(logabsdet);
19253 at::functionalization::impl::sync(logabsdet);
19254 return ::std::tuple<at::Tensor &,at::Tensor &>(sign, logabsdet);
19255 }
19256 }
19257
19258 ::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_out_eigenvalues(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
19259 if (false) {
19260 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19261 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19262 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19263 auto A_meta = to_meta(A);
19264 auto eigenvalues_meta = to_meta(eigenvalues);
19265 auto eigenvectors_meta = to_meta(eigenvectors);
19266 at::AutoDispatchSkipFunctionalize func_guard;
19267 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19268 at::_ops::_linalg_eigh_eigenvalues::call(A_meta, UPLO, compute_v, eigenvalues_meta, eigenvectors_meta);
19269 }
19270
19271 at::Tensor A_;
19272 if (at::functionalization::impl::isFunctionalTensor(A)) {
19273 at::functionalization::impl::sync(A);
19274 A_ = at::functionalization::impl::from_functional_tensor(A);
19275 } else {
19276 A_ = A;
19277 }
19278
19279 at::Tensor eigenvalues_;
19280 if (at::functionalization::impl::isFunctionalTensor(eigenvalues)) {
19281 at::functionalization::impl::sync(eigenvalues);
19282 eigenvalues_ = at::functionalization::impl::from_functional_tensor(eigenvalues);
19283 } else {
19284 eigenvalues_ = eigenvalues;
19285 }
19286
19287 at::Tensor eigenvectors_;
19288 if (at::functionalization::impl::isFunctionalTensor(eigenvectors)) {
19289 at::functionalization::impl::sync(eigenvectors);
19290 eigenvectors_ = at::functionalization::impl::from_functional_tensor(eigenvectors);
19291 } else {
19292 eigenvectors_ = eigenvectors;
19293 }
19294 if (!(true && at::functionalization::impl::isFunctionalTensor(eigenvalues) && at::functionalization::impl::isFunctionalTensor(eigenvectors))) {
19295 if ((false || at::functionalization::impl::isFunctionalTensor(A))) {
19296 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19297 TORCH_INTERNAL_ASSERT(false,
19298 "mutating a non-functional tensor with a functional tensor is not allowed.",
19299 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19300 } else {
19301 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19302 at::AutoDispatchSkipFunctionalize guard;
19303 ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_linalg_eigh_eigenvalues::call(A_, UPLO, compute_v, eigenvalues_, eigenvectors_);
19304 return ::std::tuple<at::Tensor &,at::Tensor &>(eigenvalues, eigenvectors);;
19305 }
19306 } else {
19307 ::std::tuple<at::Tensor,at::Tensor> tmp_output;
19308 {
19309 at::AutoDispatchSkipFunctionalize guard;
19310 tmp_output = at::_ops::_linalg_eigh::call(A_, UPLO, compute_v);
19311 }
19312 at::functionalization::impl::replace_(eigenvalues, std::get<0>(tmp_output));
19313 at::functionalization::impl::commit_update(eigenvalues);
19314 at::functionalization::impl::sync(eigenvalues);
19315 at::functionalization::impl::replace_(eigenvectors, std::get<1>(tmp_output));
19316 at::functionalization::impl::commit_update(eigenvectors);
19317 at::functionalization::impl::sync(eigenvectors);
19318 return ::std::tuple<at::Tensor &,at::Tensor &>(eigenvalues, eigenvectors);
19319 }
19320 }
19321
19322 at::Tensor & inner_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
19323 if (false) {
19324 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19325 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19326 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19327 auto self_meta = to_meta(self);
19328 auto other_meta = to_meta(other);
19329 auto out_meta = to_meta(out);
19330 at::AutoDispatchSkipFunctionalize func_guard;
19331 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19332 at::_ops::inner_out::call(self_meta, other_meta, out_meta);
19333 }
19334
19335 at::Tensor self_;
19336 if (at::functionalization::impl::isFunctionalTensor(self)) {
19337 at::functionalization::impl::sync(self);
19338 self_ = at::functionalization::impl::from_functional_tensor(self);
19339 } else {
19340 self_ = self;
19341 }
19342
19343 at::Tensor other_;
19344 if (at::functionalization::impl::isFunctionalTensor(other)) {
19345 at::functionalization::impl::sync(other);
19346 other_ = at::functionalization::impl::from_functional_tensor(other);
19347 } else {
19348 other_ = other;
19349 }
19350
19351 at::Tensor out_;
19352 if (at::functionalization::impl::isFunctionalTensor(out)) {
19353 at::functionalization::impl::sync(out);
19354 out_ = at::functionalization::impl::from_functional_tensor(out);
19355 } else {
19356 out_ = out;
19357 }
19358 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19359 if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
19360 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19361 TORCH_INTERNAL_ASSERT(false,
19362 "mutating a non-functional tensor with a functional tensor is not allowed.",
19363 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19364 } else {
19365 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19366 at::AutoDispatchSkipFunctionalize guard;
19367 at::Tensor tmp_output = at::_ops::inner_out::call(self_, other_, out_);
19368 return out;;
19369 }
19370 } else {
19371 at::Tensor tmp_output;
19372 {
19373 at::AutoDispatchSkipFunctionalize guard;
19374 tmp_output = at::_ops::inner::call(self_, other_);
19375 }
19376 at::functionalization::impl::replace_(out, tmp_output);
19377 at::functionalization::impl::commit_update(out);
19378 at::functionalization::impl::sync(out);
19379 return out;
19380 }
19381 }
19382
19383 at::Tensor & linalg_matrix_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
19384 if (false) {
19385 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19386 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19387 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19388 auto self_meta = to_meta(self);
19389 auto out_meta = to_meta(out);
19390 at::AutoDispatchSkipFunctionalize func_guard;
19391 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19392 at::_ops::linalg_matrix_norm_out::call(self_meta, ord, dim, keepdim, dtype, out_meta);
19393 }
19394
19395 at::Tensor self_;
19396 if (at::functionalization::impl::isFunctionalTensor(self)) {
19397 at::functionalization::impl::sync(self);
19398 self_ = at::functionalization::impl::from_functional_tensor(self);
19399 } else {
19400 self_ = self;
19401 }
19402
19403 at::Tensor out_;
19404 if (at::functionalization::impl::isFunctionalTensor(out)) {
19405 at::functionalization::impl::sync(out);
19406 out_ = at::functionalization::impl::from_functional_tensor(out);
19407 } else {
19408 out_ = out;
19409 }
19410 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19411 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
19412 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19413 TORCH_INTERNAL_ASSERT(false,
19414 "mutating a non-functional tensor with a functional tensor is not allowed.",
19415 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19416 } else {
19417 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19418 at::AutoDispatchSkipFunctionalize guard;
19419 at::Tensor tmp_output = at::_ops::linalg_matrix_norm_out::call(self_, ord, dim, keepdim, dtype, out_);
19420 return out;;
19421 }
19422 } else {
19423 at::Tensor tmp_output;
19424 {
19425 at::AutoDispatchSkipFunctionalize guard;
19426 tmp_output = at::_ops::linalg_matrix_norm::call(self_, ord, dim, keepdim, dtype);
19427 }
19428 at::functionalization::impl::replace_(out, tmp_output);
19429 at::functionalization::impl::commit_update(out);
19430 at::functionalization::impl::sync(out);
19431 return out;
19432 }
19433 }
19434
19435 at::Tensor & linalg_matrix_norm_out_str_ord_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
19436 if (false) {
19437 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19438 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19439 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19440 auto self_meta = to_meta(self);
19441 auto out_meta = to_meta(out);
19442 at::AutoDispatchSkipFunctionalize func_guard;
19443 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19444 at::_ops::linalg_matrix_norm_str_ord_out::call(self_meta, ord, dim, keepdim, dtype, out_meta);
19445 }
19446
19447 at::Tensor self_;
19448 if (at::functionalization::impl::isFunctionalTensor(self)) {
19449 at::functionalization::impl::sync(self);
19450 self_ = at::functionalization::impl::from_functional_tensor(self);
19451 } else {
19452 self_ = self;
19453 }
19454
19455 at::Tensor out_;
19456 if (at::functionalization::impl::isFunctionalTensor(out)) {
19457 at::functionalization::impl::sync(out);
19458 out_ = at::functionalization::impl::from_functional_tensor(out);
19459 } else {
19460 out_ = out;
19461 }
19462 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19463 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
19464 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19465 TORCH_INTERNAL_ASSERT(false,
19466 "mutating a non-functional tensor with a functional tensor is not allowed.",
19467 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19468 } else {
19469 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19470 at::AutoDispatchSkipFunctionalize guard;
19471 at::Tensor tmp_output = at::_ops::linalg_matrix_norm_str_ord_out::call(self_, ord, dim, keepdim, dtype, out_);
19472 return out;;
19473 }
19474 } else {
19475 at::Tensor tmp_output;
19476 {
19477 at::AutoDispatchSkipFunctionalize guard;
19478 tmp_output = at::_ops::linalg_matrix_norm_str_ord::call(self_, ord, dim, keepdim, dtype);
19479 }
19480 at::functionalization::impl::replace_(out, tmp_output);
19481 at::functionalization::impl::commit_update(out);
19482 at::functionalization::impl::sync(out);
19483 return out;
19484 }
19485 }
19486
19487 at::Tensor & linalg_tensorinv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind, at::Tensor & out) {
19488 if (false) {
19489 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19490 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19491 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19492 auto self_meta = to_meta(self);
19493 auto out_meta = to_meta(out);
19494 at::AutoDispatchSkipFunctionalize func_guard;
19495 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19496 at::_ops::linalg_tensorinv_out::call(self_meta, ind, out_meta);
19497 }
19498
19499 at::Tensor self_;
19500 if (at::functionalization::impl::isFunctionalTensor(self)) {
19501 at::functionalization::impl::sync(self);
19502 self_ = at::functionalization::impl::from_functional_tensor(self);
19503 } else {
19504 self_ = self;
19505 }
19506
19507 at::Tensor out_;
19508 if (at::functionalization::impl::isFunctionalTensor(out)) {
19509 at::functionalization::impl::sync(out);
19510 out_ = at::functionalization::impl::from_functional_tensor(out);
19511 } else {
19512 out_ = out;
19513 }
19514 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19515 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
19516 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19517 TORCH_INTERNAL_ASSERT(false,
19518 "mutating a non-functional tensor with a functional tensor is not allowed.",
19519 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19520 } else {
19521 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19522 at::AutoDispatchSkipFunctionalize guard;
19523 at::Tensor tmp_output = at::_ops::linalg_tensorinv_out::call(self_, ind, out_);
19524 return out;;
19525 }
19526 } else {
19527 at::Tensor tmp_output;
19528 {
19529 at::AutoDispatchSkipFunctionalize guard;
19530 tmp_output = at::_ops::linalg_tensorinv::call(self_, ind);
19531 }
19532 at::functionalization::impl::replace_(out, tmp_output);
19533 at::functionalization::impl::commit_update(out);
19534 at::functionalization::impl::sync(out);
19535 return out;
19536 }
19537 }
19538
19539 at::Tensor & linalg_matrix_power_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out) {
19540 if (false) {
19541 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19542 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19543 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19544 auto self_meta = to_meta(self);
19545 auto out_meta = to_meta(out);
19546 at::AutoDispatchSkipFunctionalize func_guard;
19547 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19548 at::_ops::linalg_matrix_power_out::call(self_meta, n, out_meta);
19549 }
19550
19551 at::Tensor self_;
19552 if (at::functionalization::impl::isFunctionalTensor(self)) {
19553 at::functionalization::impl::sync(self);
19554 self_ = at::functionalization::impl::from_functional_tensor(self);
19555 } else {
19556 self_ = self;
19557 }
19558
19559 at::Tensor out_;
19560 if (at::functionalization::impl::isFunctionalTensor(out)) {
19561 at::functionalization::impl::sync(out);
19562 out_ = at::functionalization::impl::from_functional_tensor(out);
19563 } else {
19564 out_ = out;
19565 }
19566 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19567 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
19568 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19569 TORCH_INTERNAL_ASSERT(false,
19570 "mutating a non-functional tensor with a functional tensor is not allowed.",
19571 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19572 } else {
19573 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19574 at::AutoDispatchSkipFunctionalize guard;
19575 at::Tensor tmp_output = at::_ops::linalg_matrix_power_out::call(self_, n, out_);
19576 return out;;
19577 }
19578 } else {
19579 at::Tensor tmp_output;
19580 {
19581 at::AutoDispatchSkipFunctionalize guard;
19582 tmp_output = at::_ops::linalg_matrix_power::call(self_, n);
19583 }
19584 at::functionalization::impl::replace_(out, tmp_output);
19585 at::functionalization::impl::commit_update(out);
19586 at::functionalization::impl::sync(out);
19587 return out;
19588 }
19589 }
19590
19591 at::Tensor & _make_dual_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out) {
19592 if (false) {
19593 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19594 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19595 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19596 auto primal_meta = to_meta(primal);
19597 auto tangent_meta = to_meta(tangent);
19598 auto out_meta = to_meta(out);
19599 at::AutoDispatchSkipFunctionalize func_guard;
19600 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19601 at::_ops::_make_dual_copy_out::call(primal_meta, tangent_meta, level, out_meta);
19602 }
19603
19604 at::Tensor primal_;
19605 if (at::functionalization::impl::isFunctionalTensor(primal)) {
19606 at::functionalization::impl::sync(primal);
19607 primal_ = at::functionalization::impl::from_functional_tensor(primal);
19608 } else {
19609 primal_ = primal;
19610 }
19611
19612 at::Tensor tangent_;
19613 if (at::functionalization::impl::isFunctionalTensor(tangent)) {
19614 at::functionalization::impl::sync(tangent);
19615 tangent_ = at::functionalization::impl::from_functional_tensor(tangent);
19616 } else {
19617 tangent_ = tangent;
19618 }
19619
19620 at::Tensor out_;
19621 if (at::functionalization::impl::isFunctionalTensor(out)) {
19622 at::functionalization::impl::sync(out);
19623 out_ = at::functionalization::impl::from_functional_tensor(out);
19624 } else {
19625 out_ = out;
19626 }
19627 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19628 if ((false || at::functionalization::impl::isFunctionalTensor(primal) || at::functionalization::impl::isFunctionalTensor(tangent))) {
19629 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19630 TORCH_INTERNAL_ASSERT(false,
19631 "mutating a non-functional tensor with a functional tensor is not allowed.",
19632 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19633 } else {
19634 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19635 at::AutoDispatchSkipFunctionalize guard;
19636 at::Tensor tmp_output = at::_ops::_make_dual_copy_out::call(primal_, tangent_, level, out_);
19637 return out;;
19638 }
19639 } else {
19640 at::Tensor tmp_output;
19641 {
19642 at::AutoDispatchSkipFunctionalize guard;
19643 tmp_output = at::_ops::_make_dual_copy::call(primal_, tangent_, level);
19644 }
19645 at::functionalization::impl::replace_(out, tmp_output);
19646 at::functionalization::impl::commit_update(out);
19647 at::functionalization::impl::sync(out);
19648 return out;
19649 }
19650 }
19651
19652 at::Tensor & view_as_real_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
19653 if (false) {
19654 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19655 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19656 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19657 auto self_meta = to_meta(self);
19658 auto out_meta = to_meta(out);
19659 at::AutoDispatchSkipFunctionalize func_guard;
19660 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19661 at::_ops::view_as_real_copy_out::call(self_meta, out_meta);
19662 }
19663
19664 at::Tensor self_;
19665 if (at::functionalization::impl::isFunctionalTensor(self)) {
19666 at::functionalization::impl::sync(self);
19667 self_ = at::functionalization::impl::from_functional_tensor(self);
19668 } else {
19669 self_ = self;
19670 }
19671
19672 at::Tensor out_;
19673 if (at::functionalization::impl::isFunctionalTensor(out)) {
19674 at::functionalization::impl::sync(out);
19675 out_ = at::functionalization::impl::from_functional_tensor(out);
19676 } else {
19677 out_ = out;
19678 }
19679 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19680 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
19681 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19682 TORCH_INTERNAL_ASSERT(false,
19683 "mutating a non-functional tensor with a functional tensor is not allowed.",
19684 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19685 } else {
19686 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19687 at::AutoDispatchSkipFunctionalize guard;
19688 at::Tensor tmp_output = at::_ops::view_as_real_copy_out::call(self_, out_);
19689 return out;;
19690 }
19691 } else {
19692 at::Tensor tmp_output;
19693 {
19694 at::AutoDispatchSkipFunctionalize guard;
19695 tmp_output = at::_ops::view_as_real_copy::call(self_);
19696 }
19697 at::functionalization::impl::replace_(out, tmp_output);
19698 at::functionalization::impl::commit_update(out);
19699 at::functionalization::impl::sync(out);
19700 return out;
19701 }
19702 }
19703
19704 at::Tensor & view_as_complex_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
19705 if (false) {
19706 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19707 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19708 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19709 auto self_meta = to_meta(self);
19710 auto out_meta = to_meta(out);
19711 at::AutoDispatchSkipFunctionalize func_guard;
19712 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19713 at::_ops::view_as_complex_copy_out::call(self_meta, out_meta);
19714 }
19715
19716 at::Tensor self_;
19717 if (at::functionalization::impl::isFunctionalTensor(self)) {
19718 at::functionalization::impl::sync(self);
19719 self_ = at::functionalization::impl::from_functional_tensor(self);
19720 } else {
19721 self_ = self;
19722 }
19723
19724 at::Tensor out_;
19725 if (at::functionalization::impl::isFunctionalTensor(out)) {
19726 at::functionalization::impl::sync(out);
19727 out_ = at::functionalization::impl::from_functional_tensor(out);
19728 } else {
19729 out_ = out;
19730 }
19731 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19732 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
19733 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19734 TORCH_INTERNAL_ASSERT(false,
19735 "mutating a non-functional tensor with a functional tensor is not allowed.",
19736 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19737 } else {
19738 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19739 at::AutoDispatchSkipFunctionalize guard;
19740 at::Tensor tmp_output = at::_ops::view_as_complex_copy_out::call(self_, out_);
19741 return out;;
19742 }
19743 } else {
19744 at::Tensor tmp_output;
19745 {
19746 at::AutoDispatchSkipFunctionalize guard;
19747 tmp_output = at::_ops::view_as_complex_copy::call(self_);
19748 }
19749 at::functionalization::impl::replace_(out, tmp_output);
19750 at::functionalization::impl::commit_update(out);
19751 at::functionalization::impl::sync(out);
19752 return out;
19753 }
19754 }
19755
19756 at::Tensor & _conj_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
19757 if (false) {
19758 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19759 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19760 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19761 auto self_meta = to_meta(self);
19762 auto out_meta = to_meta(out);
19763 at::AutoDispatchSkipFunctionalize func_guard;
19764 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19765 at::_ops::_conj_copy_out::call(self_meta, out_meta);
19766 }
19767
19768 at::Tensor self_;
19769 if (at::functionalization::impl::isFunctionalTensor(self)) {
19770 at::functionalization::impl::sync(self);
19771 self_ = at::functionalization::impl::from_functional_tensor(self);
19772 } else {
19773 self_ = self;
19774 }
19775
19776 at::Tensor out_;
19777 if (at::functionalization::impl::isFunctionalTensor(out)) {
19778 at::functionalization::impl::sync(out);
19779 out_ = at::functionalization::impl::from_functional_tensor(out);
19780 } else {
19781 out_ = out;
19782 }
19783 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19784 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
19785 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19786 TORCH_INTERNAL_ASSERT(false,
19787 "mutating a non-functional tensor with a functional tensor is not allowed.",
19788 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19789 } else {
19790 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19791 at::AutoDispatchSkipFunctionalize guard;
19792 at::Tensor tmp_output = at::_ops::_conj_copy_out::call(self_, out_);
19793 return out;;
19794 }
19795 } else {
19796 at::Tensor tmp_output;
19797 {
19798 at::AutoDispatchSkipFunctionalize guard;
19799 tmp_output = at::_ops::_conj_copy::call(self_);
19800 }
19801 at::functionalization::impl::replace_(out, tmp_output);
19802 at::functionalization::impl::commit_update(out);
19803 at::functionalization::impl::sync(out);
19804 return out;
19805 }
19806 }
19807
19808 at::Tensor & _neg_view_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
19809 if (false) {
19810 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19811 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19812 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19813 auto self_meta = to_meta(self);
19814 auto out_meta = to_meta(out);
19815 at::AutoDispatchSkipFunctionalize func_guard;
19816 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19817 at::_ops::_neg_view_copy_out::call(self_meta, out_meta);
19818 }
19819
19820 at::Tensor self_;
19821 if (at::functionalization::impl::isFunctionalTensor(self)) {
19822 at::functionalization::impl::sync(self);
19823 self_ = at::functionalization::impl::from_functional_tensor(self);
19824 } else {
19825 self_ = self;
19826 }
19827
19828 at::Tensor out_;
19829 if (at::functionalization::impl::isFunctionalTensor(out)) {
19830 at::functionalization::impl::sync(out);
19831 out_ = at::functionalization::impl::from_functional_tensor(out);
19832 } else {
19833 out_ = out;
19834 }
19835 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19836 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
19837 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19838 TORCH_INTERNAL_ASSERT(false,
19839 "mutating a non-functional tensor with a functional tensor is not allowed.",
19840 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19841 } else {
19842 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19843 at::AutoDispatchSkipFunctionalize guard;
19844 at::Tensor tmp_output = at::_ops::_neg_view_copy_out::call(self_, out_);
19845 return out;;
19846 }
19847 } else {
19848 at::Tensor tmp_output;
19849 {
19850 at::AutoDispatchSkipFunctionalize guard;
19851 tmp_output = at::_ops::_neg_view_copy::call(self_);
19852 }
19853 at::functionalization::impl::replace_(out, tmp_output);
19854 at::functionalization::impl::commit_update(out);
19855 at::functionalization::impl::sync(out);
19856 return out;
19857 }
19858 }
19859
19860 at::Tensor & permute_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
19861 if (false) {
19862 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19863 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19864 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19865 auto self_meta = to_meta(self);
19866 auto out_meta = to_meta(out);
19867 at::AutoDispatchSkipFunctionalize func_guard;
19868 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19869 at::_ops::permute_copy_out::call(self_meta, dims, out_meta);
19870 }
19871
19872 at::Tensor self_;
19873 if (at::functionalization::impl::isFunctionalTensor(self)) {
19874 at::functionalization::impl::sync(self);
19875 self_ = at::functionalization::impl::from_functional_tensor(self);
19876 } else {
19877 self_ = self;
19878 }
19879
19880 at::Tensor out_;
19881 if (at::functionalization::impl::isFunctionalTensor(out)) {
19882 at::functionalization::impl::sync(out);
19883 out_ = at::functionalization::impl::from_functional_tensor(out);
19884 } else {
19885 out_ = out;
19886 }
19887 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19888 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
19889 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19890 TORCH_INTERNAL_ASSERT(false,
19891 "mutating a non-functional tensor with a functional tensor is not allowed.",
19892 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19893 } else {
19894 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19895 at::AutoDispatchSkipFunctionalize guard;
19896 at::Tensor tmp_output = at::_ops::permute_copy_out::call(self_, dims, out_);
19897 return out;;
19898 }
19899 } else {
19900 at::Tensor tmp_output;
19901 {
19902 at::AutoDispatchSkipFunctionalize guard;
19903 tmp_output = at::_ops::permute_copy::call(self_, dims);
19904 }
19905 at::functionalization::impl::replace_(out, tmp_output);
19906 at::functionalization::impl::commit_update(out);
19907 at::functionalization::impl::sync(out);
19908 return out;
19909 }
19910 }
19911
19912 void split_copy_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
19913 if (false) {
19914 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19915 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19916 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19917 auto self_meta = to_meta(self);
19918 auto out_meta = to_meta(out);
19919 at::AutoDispatchSkipFunctionalize func_guard;
19920 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19921 at::_ops::split_copy_Tensor_out::call(self_meta, split_size, dim, out_meta);
19922 }
19923
19924 at::Tensor self_;
19925 if (at::functionalization::impl::isFunctionalTensor(self)) {
19926 at::functionalization::impl::sync(self);
19927 self_ = at::functionalization::impl::from_functional_tensor(self);
19928 } else {
19929 self_ = self;
19930 }
19931
19932 ::std::vector<at::Tensor> out_;
19933 if (at::functionalization::impl::isFunctionalTensor(out)) {
19934 at::functionalization::impl::sync(out);
19935 out_ = at::functionalization::impl::from_functional_tensor(out);
19936 } else {
19937 out_ = out.vec();
19938 }
19939 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19940 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
19941 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19942 TORCH_INTERNAL_ASSERT(false,
19943 "mutating a non-functional tensor with a functional tensor is not allowed.",
19944 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19945 } else {
19946 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19947 at::AutoDispatchSkipFunctionalize guard;
19948 at::_ops::split_copy_Tensor_out::call(self_, split_size, dim, out_);
19949 ;
19950 }
19951 } else {
19952 ::std::vector<at::Tensor> tmp_output;
19953 {
19954 at::AutoDispatchSkipFunctionalize guard;
19955 tmp_output = at::_ops::split_copy_Tensor::call(self_, split_size, dim);
19956 }
19957 at::functionalization::impl::replace_(out, tmp_output);
19958 at::functionalization::impl::commit_update(out);
19959 at::functionalization::impl::sync(out);
19960
19961 }
19962 }
19963
19964 at::Tensor & t_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
19965 if (false) {
19966 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
19967 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
19968 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
19969 auto self_meta = to_meta(self);
19970 auto out_meta = to_meta(out);
19971 at::AutoDispatchSkipFunctionalize func_guard;
19972 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
19973 at::_ops::t_copy_out::call(self_meta, out_meta);
19974 }
19975
19976 at::Tensor self_;
19977 if (at::functionalization::impl::isFunctionalTensor(self)) {
19978 at::functionalization::impl::sync(self);
19979 self_ = at::functionalization::impl::from_functional_tensor(self);
19980 } else {
19981 self_ = self;
19982 }
19983
19984 at::Tensor out_;
19985 if (at::functionalization::impl::isFunctionalTensor(out)) {
19986 at::functionalization::impl::sync(out);
19987 out_ = at::functionalization::impl::from_functional_tensor(out);
19988 } else {
19989 out_ = out;
19990 }
19991 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
19992 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
19993 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
19994 TORCH_INTERNAL_ASSERT(false,
19995 "mutating a non-functional tensor with a functional tensor is not allowed.",
19996 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
19997 } else {
19998 // case 2: arguments are not functional tensors, so we no-op and redispatch.
19999 at::AutoDispatchSkipFunctionalize guard;
20000 at::Tensor tmp_output = at::_ops::t_copy_out::call(self_, out_);
20001 return out;;
20002 }
20003 } else {
20004 at::Tensor tmp_output;
20005 {
20006 at::AutoDispatchSkipFunctionalize guard;
20007 tmp_output = at::_ops::t_copy::call(self_);
20008 }
20009 at::functionalization::impl::replace_(out, tmp_output);
20010 at::functionalization::impl::commit_update(out);
20011 at::functionalization::impl::sync(out);
20012 return out;
20013 }
20014 }
20015
20016 at::Tensor & ccol_indices_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
20017 if (false) {
20018 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
20019 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
20020 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
20021 auto self_meta = to_meta(self);
20022 auto out_meta = to_meta(out);
20023 at::AutoDispatchSkipFunctionalize func_guard;
20024 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
20025 at::_ops::ccol_indices_copy_out::call(self_meta, out_meta);
20026 }
20027
20028 at::Tensor self_;
20029 if (at::functionalization::impl::isFunctionalTensor(self)) {
20030 at::functionalization::impl::sync(self);
20031 self_ = at::functionalization::impl::from_functional_tensor(self);
20032 } else {
20033 self_ = self;
20034 }
20035
20036 at::Tensor out_;
20037 if (at::functionalization::impl::isFunctionalTensor(out)) {
20038 at::functionalization::impl::sync(out);
20039 out_ = at::functionalization::impl::from_functional_tensor(out);
20040 } else {
20041 out_ = out;
20042 }
20043 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
20044 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
20045 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
20046 TORCH_INTERNAL_ASSERT(false,
20047 "mutating a non-functional tensor with a functional tensor is not allowed.",
20048 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
20049 } else {
20050 // case 2: arguments are not functional tensors, so we no-op and redispatch.
20051 at::AutoDispatchSkipFunctionalize guard;
20052 at::Tensor tmp_output = at::_ops::ccol_indices_copy_out::call(self_, out_);
20053 return out;;
20054 }
20055 } else {
20056 at::Tensor tmp_output;
20057 {
20058 at::AutoDispatchSkipFunctionalize guard;
20059 tmp_output = at::_ops::ccol_indices_copy::call(self_);
20060 }
20061 at::functionalization::impl::replace_(out, tmp_output);
20062 at::functionalization::impl::commit_update(out);
20063 at::functionalization::impl::sync(out);
20064 return out;
20065 }
20066 }
20067
20068 at::Tensor & unfold_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out) {
20069 if (false) {
20070 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
20071 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
20072 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
20073 auto self_meta = to_meta(self);
20074 auto out_meta = to_meta(out);
20075 at::AutoDispatchSkipFunctionalize func_guard;
20076 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
20077 at::_ops::unfold_copy_out::call(self_meta, dimension, size, step, out_meta);
20078 }
20079
20080 at::Tensor self_;
20081 if (at::functionalization::impl::isFunctionalTensor(self)) {
20082 at::functionalization::impl::sync(self);
20083 self_ = at::functionalization::impl::from_functional_tensor(self);
20084 } else {
20085 self_ = self;
20086 }
20087
20088 at::Tensor out_;
20089 if (at::functionalization::impl::isFunctionalTensor(out)) {
20090 at::functionalization::impl::sync(out);
20091 out_ = at::functionalization::impl::from_functional_tensor(out);
20092 } else {
20093 out_ = out;
20094 }
20095 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
20096 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
20097 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
20098 TORCH_INTERNAL_ASSERT(false,
20099 "mutating a non-functional tensor with a functional tensor is not allowed.",
20100 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
20101 } else {
20102 // case 2: arguments are not functional tensors, so we no-op and redispatch.
20103 at::AutoDispatchSkipFunctionalize guard;
20104 at::Tensor tmp_output = at::_ops::unfold_copy_out::call(self_, dimension, size, step, out_);
20105 return out;;
20106 }
20107 } else {
20108 at::Tensor tmp_output;
20109 {
20110 at::AutoDispatchSkipFunctionalize guard;
20111 tmp_output = at::_ops::unfold_copy::call(self_, dimension, size, step);
20112 }
20113 at::functionalization::impl::replace_(out, tmp_output);
20114 at::functionalization::impl::commit_update(out);
20115 at::functionalization::impl::sync(out);
20116 return out;
20117 }
20118 }
20119
20120 at::Tensor & _transformer_encoder_layer_fwd_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type, at::Tensor & out) {
20121 if (false) {
20122 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
20123 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
20124 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
20125 auto src_meta = to_meta(src);
20126 auto qkv_weight_meta = to_meta(qkv_weight);
20127 auto qkv_bias_meta = to_meta(qkv_bias);
20128 auto proj_weight_meta = to_meta(proj_weight);
20129 auto proj_bias_meta = to_meta(proj_bias);
20130 auto norm_weight_1_meta = to_meta(norm_weight_1);
20131 auto norm_bias_1_meta = to_meta(norm_bias_1);
20132 auto norm_weight_2_meta = to_meta(norm_weight_2);
20133 auto norm_bias_2_meta = to_meta(norm_bias_2);
20134 auto ffn_weight_1_meta = to_meta(ffn_weight_1);
20135 auto ffn_bias_1_meta = to_meta(ffn_bias_1);
20136 auto ffn_weight_2_meta = to_meta(ffn_weight_2);
20137 auto ffn_bias_2_meta = to_meta(ffn_bias_2);
20138 auto mask_meta = to_meta(mask);
20139 auto out_meta = to_meta(out);
20140 at::AutoDispatchSkipFunctionalize func_guard;
20141 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
20142 at::_ops::_transformer_encoder_layer_fwd_out::call(src_meta, embed_dim, num_heads, qkv_weight_meta, qkv_bias_meta, proj_weight_meta, proj_bias_meta, use_gelu, norm_first, eps, norm_weight_1_meta, norm_bias_1_meta, norm_weight_2_meta, norm_bias_2_meta, ffn_weight_1_meta, ffn_bias_1_meta, ffn_weight_2_meta, ffn_bias_2_meta, mask_meta, mask_type, out_meta);
20143 }
20144
20145 at::Tensor src_;
20146 if (at::functionalization::impl::isFunctionalTensor(src)) {
20147 at::functionalization::impl::sync(src);
20148 src_ = at::functionalization::impl::from_functional_tensor(src);
20149 } else {
20150 src_ = src;
20151 }
20152
20153 at::Tensor qkv_weight_;
20154 if (at::functionalization::impl::isFunctionalTensor(qkv_weight)) {
20155 at::functionalization::impl::sync(qkv_weight);
20156 qkv_weight_ = at::functionalization::impl::from_functional_tensor(qkv_weight);
20157 } else {
20158 qkv_weight_ = qkv_weight;
20159 }
20160
20161 at::Tensor qkv_bias_;
20162 if (at::functionalization::impl::isFunctionalTensor(qkv_bias)) {
20163 at::functionalization::impl::sync(qkv_bias);
20164 qkv_bias_ = at::functionalization::impl::from_functional_tensor(qkv_bias);
20165 } else {
20166 qkv_bias_ = qkv_bias;
20167 }
20168
20169 at::Tensor proj_weight_;
20170 if (at::functionalization::impl::isFunctionalTensor(proj_weight)) {
20171 at::functionalization::impl::sync(proj_weight);
20172 proj_weight_ = at::functionalization::impl::from_functional_tensor(proj_weight);
20173 } else {
20174 proj_weight_ = proj_weight;
20175 }
20176
20177 at::Tensor proj_bias_;
20178 if (at::functionalization::impl::isFunctionalTensor(proj_bias)) {
20179 at::functionalization::impl::sync(proj_bias);
20180 proj_bias_ = at::functionalization::impl::from_functional_tensor(proj_bias);
20181 } else {
20182 proj_bias_ = proj_bias;
20183 }
20184
20185 at::Tensor norm_weight_1_;
20186 if (at::functionalization::impl::isFunctionalTensor(norm_weight_1)) {
20187 at::functionalization::impl::sync(norm_weight_1);
20188 norm_weight_1_ = at::functionalization::impl::from_functional_tensor(norm_weight_1);
20189 } else {
20190 norm_weight_1_ = norm_weight_1;
20191 }
20192
20193 at::Tensor norm_bias_1_;
20194 if (at::functionalization::impl::isFunctionalTensor(norm_bias_1)) {
20195 at::functionalization::impl::sync(norm_bias_1);
20196 norm_bias_1_ = at::functionalization::impl::from_functional_tensor(norm_bias_1);
20197 } else {
20198 norm_bias_1_ = norm_bias_1;
20199 }
20200
20201 at::Tensor norm_weight_2_;
20202 if (at::functionalization::impl::isFunctionalTensor(norm_weight_2)) {
20203 at::functionalization::impl::sync(norm_weight_2);
20204 norm_weight_2_ = at::functionalization::impl::from_functional_tensor(norm_weight_2);
20205 } else {
20206 norm_weight_2_ = norm_weight_2;
20207 }
20208
20209 at::Tensor norm_bias_2_;
20210 if (at::functionalization::impl::isFunctionalTensor(norm_bias_2)) {
20211 at::functionalization::impl::sync(norm_bias_2);
20212 norm_bias_2_ = at::functionalization::impl::from_functional_tensor(norm_bias_2);
20213 } else {
20214 norm_bias_2_ = norm_bias_2;
20215 }
20216
20217 at::Tensor ffn_weight_1_;
20218 if (at::functionalization::impl::isFunctionalTensor(ffn_weight_1)) {
20219 at::functionalization::impl::sync(ffn_weight_1);
20220 ffn_weight_1_ = at::functionalization::impl::from_functional_tensor(ffn_weight_1);
20221 } else {
20222 ffn_weight_1_ = ffn_weight_1;
20223 }
20224
20225 at::Tensor ffn_bias_1_;
20226 if (at::functionalization::impl::isFunctionalTensor(ffn_bias_1)) {
20227 at::functionalization::impl::sync(ffn_bias_1);
20228 ffn_bias_1_ = at::functionalization::impl::from_functional_tensor(ffn_bias_1);
20229 } else {
20230 ffn_bias_1_ = ffn_bias_1;
20231 }
20232
20233 at::Tensor ffn_weight_2_;
20234 if (at::functionalization::impl::isFunctionalTensor(ffn_weight_2)) {
20235 at::functionalization::impl::sync(ffn_weight_2);
20236 ffn_weight_2_ = at::functionalization::impl::from_functional_tensor(ffn_weight_2);
20237 } else {
20238 ffn_weight_2_ = ffn_weight_2;
20239 }
20240
20241 at::Tensor ffn_bias_2_;
20242 if (at::functionalization::impl::isFunctionalTensor(ffn_bias_2)) {
20243 at::functionalization::impl::sync(ffn_bias_2);
20244 ffn_bias_2_ = at::functionalization::impl::from_functional_tensor(ffn_bias_2);
20245 } else {
20246 ffn_bias_2_ = ffn_bias_2;
20247 }
20248
20249 c10::optional<at::Tensor> mask_;
20250 if (at::functionalization::impl::isFunctionalTensor(mask)) {
20251 at::functionalization::impl::sync(mask);
20252 mask_ = at::functionalization::impl::from_functional_tensor(mask);
20253 } else {
20254 mask_ = mask;
20255 }
20256
20257 at::Tensor out_;
20258 if (at::functionalization::impl::isFunctionalTensor(out)) {
20259 at::functionalization::impl::sync(out);
20260 out_ = at::functionalization::impl::from_functional_tensor(out);
20261 } else {
20262 out_ = out;
20263 }
20264 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
20265 if ((false || at::functionalization::impl::isFunctionalTensor(src) || at::functionalization::impl::isFunctionalTensor(qkv_weight) || at::functionalization::impl::isFunctionalTensor(qkv_bias) || at::functionalization::impl::isFunctionalTensor(proj_weight) || at::functionalization::impl::isFunctionalTensor(proj_bias) || at::functionalization::impl::isFunctionalTensor(norm_weight_1) || at::functionalization::impl::isFunctionalTensor(norm_bias_1) || at::functionalization::impl::isFunctionalTensor(norm_weight_2) || at::functionalization::impl::isFunctionalTensor(norm_bias_2) || at::functionalization::impl::isFunctionalTensor(ffn_weight_1) || at::functionalization::impl::isFunctionalTensor(ffn_bias_1) || at::functionalization::impl::isFunctionalTensor(ffn_weight_2) || at::functionalization::impl::isFunctionalTensor(ffn_bias_2) || at::functionalization::impl::isFunctionalTensor(mask))) {
20266 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
20267 TORCH_INTERNAL_ASSERT(false,
20268 "mutating a non-functional tensor with a functional tensor is not allowed.",
20269 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
20270 } else {
20271 // case 2: arguments are not functional tensors, so we no-op and redispatch.
20272 at::AutoDispatchSkipFunctionalize guard;
20273 at::Tensor tmp_output = at::_ops::_transformer_encoder_layer_fwd_out::call(src_, embed_dim, num_heads, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, use_gelu, norm_first, eps, norm_weight_1_, norm_bias_1_, norm_weight_2_, norm_bias_2_, ffn_weight_1_, ffn_bias_1_, ffn_weight_2_, ffn_bias_2_, mask_, mask_type, out_);
20274 return out;;
20275 }
20276 } else {
20277 at::Tensor tmp_output;
20278 {
20279 at::AutoDispatchSkipFunctionalize guard;
20280 tmp_output = at::_ops::_transformer_encoder_layer_fwd::call(src_, embed_dim, num_heads, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, use_gelu, norm_first, eps, norm_weight_1_, norm_bias_1_, norm_weight_2_, norm_bias_2_, ffn_weight_1_, ffn_bias_1_, ffn_weight_2_, ffn_bias_2_, mask_, mask_type);
20281 }
20282 at::functionalization::impl::replace_(out, tmp_output);
20283 at::functionalization::impl::commit_update(out);
20284 at::functionalization::impl::sync(out);
20285 return out;
20286 }
20287 }
20288
20289 ::std::tuple<at::Tensor &,at::Tensor &> _native_multi_head_attention_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type, at::Tensor & out0, at::Tensor & out1) {
20290 if (false) {
20291 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
20292 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
20293 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
20294 auto query_meta = to_meta(query);
20295 auto key_meta = to_meta(key);
20296 auto value_meta = to_meta(value);
20297 auto qkv_weight_meta = to_meta(qkv_weight);
20298 auto qkv_bias_meta = to_meta(qkv_bias);
20299 auto proj_weight_meta = to_meta(proj_weight);
20300 auto proj_bias_meta = to_meta(proj_bias);
20301 auto mask_meta = to_meta(mask);
20302 auto out0_meta = to_meta(out0);
20303 auto out1_meta = to_meta(out1);
20304 at::AutoDispatchSkipFunctionalize func_guard;
20305 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
20306 at::_ops::_native_multi_head_attention_out::call(query_meta, key_meta, value_meta, embed_dim, num_head, qkv_weight_meta, qkv_bias_meta, proj_weight_meta, proj_bias_meta, mask_meta, need_weights, average_attn_weights, mask_type, out0_meta, out1_meta);
20307 }
20308
20309 at::Tensor query_;
20310 if (at::functionalization::impl::isFunctionalTensor(query)) {
20311 at::functionalization::impl::sync(query);
20312 query_ = at::functionalization::impl::from_functional_tensor(query);
20313 } else {
20314 query_ = query;
20315 }
20316
20317 at::Tensor key_;
20318 if (at::functionalization::impl::isFunctionalTensor(key)) {
20319 at::functionalization::impl::sync(key);
20320 key_ = at::functionalization::impl::from_functional_tensor(key);
20321 } else {
20322 key_ = key;
20323 }
20324
20325 at::Tensor value_;
20326 if (at::functionalization::impl::isFunctionalTensor(value)) {
20327 at::functionalization::impl::sync(value);
20328 value_ = at::functionalization::impl::from_functional_tensor(value);
20329 } else {
20330 value_ = value;
20331 }
20332
20333 at::Tensor qkv_weight_;
20334 if (at::functionalization::impl::isFunctionalTensor(qkv_weight)) {
20335 at::functionalization::impl::sync(qkv_weight);
20336 qkv_weight_ = at::functionalization::impl::from_functional_tensor(qkv_weight);
20337 } else {
20338 qkv_weight_ = qkv_weight;
20339 }
20340
20341 at::Tensor qkv_bias_;
20342 if (at::functionalization::impl::isFunctionalTensor(qkv_bias)) {
20343 at::functionalization::impl::sync(qkv_bias);
20344 qkv_bias_ = at::functionalization::impl::from_functional_tensor(qkv_bias);
20345 } else {
20346 qkv_bias_ = qkv_bias;
20347 }
20348
20349 at::Tensor proj_weight_;
20350 if (at::functionalization::impl::isFunctionalTensor(proj_weight)) {
20351 at::functionalization::impl::sync(proj_weight);
20352 proj_weight_ = at::functionalization::impl::from_functional_tensor(proj_weight);
20353 } else {
20354 proj_weight_ = proj_weight;
20355 }
20356
20357 at::Tensor proj_bias_;
20358 if (at::functionalization::impl::isFunctionalTensor(proj_bias)) {
20359 at::functionalization::impl::sync(proj_bias);
20360 proj_bias_ = at::functionalization::impl::from_functional_tensor(proj_bias);
20361 } else {
20362 proj_bias_ = proj_bias;
20363 }
20364
20365 c10::optional<at::Tensor> mask_;
20366 if (at::functionalization::impl::isFunctionalTensor(mask)) {
20367 at::functionalization::impl::sync(mask);
20368 mask_ = at::functionalization::impl::from_functional_tensor(mask);
20369 } else {
20370 mask_ = mask;
20371 }
20372
20373 at::Tensor out0_;
20374 if (at::functionalization::impl::isFunctionalTensor(out0)) {
20375 at::functionalization::impl::sync(out0);
20376 out0_ = at::functionalization::impl::from_functional_tensor(out0);
20377 } else {
20378 out0_ = out0;
20379 }
20380
20381 at::Tensor out1_;
20382 if (at::functionalization::impl::isFunctionalTensor(out1)) {
20383 at::functionalization::impl::sync(out1);
20384 out1_ = at::functionalization::impl::from_functional_tensor(out1);
20385 } else {
20386 out1_ = out1;
20387 }
20388 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
20389 if ((false || at::functionalization::impl::isFunctionalTensor(query) || at::functionalization::impl::isFunctionalTensor(key) || at::functionalization::impl::isFunctionalTensor(value) || at::functionalization::impl::isFunctionalTensor(qkv_weight) || at::functionalization::impl::isFunctionalTensor(qkv_bias) || at::functionalization::impl::isFunctionalTensor(proj_weight) || at::functionalization::impl::isFunctionalTensor(proj_bias) || at::functionalization::impl::isFunctionalTensor(mask))) {
20390 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
20391 TORCH_INTERNAL_ASSERT(false,
20392 "mutating a non-functional tensor with a functional tensor is not allowed.",
20393 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
20394 } else {
20395 // case 2: arguments are not functional tensors, so we no-op and redispatch.
20396 at::AutoDispatchSkipFunctionalize guard;
20397 ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_native_multi_head_attention_out::call(query_, key_, value_, embed_dim, num_head, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, mask_, need_weights, average_attn_weights, mask_type, out0_, out1_);
20398 return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);;
20399 }
20400 } else {
20401 ::std::tuple<at::Tensor,at::Tensor> tmp_output;
20402 {
20403 at::AutoDispatchSkipFunctionalize guard;
20404 tmp_output = at::_ops::_native_multi_head_attention::call(query_, key_, value_, embed_dim, num_head, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, mask_, need_weights, average_attn_weights, mask_type);
20405 }
20406 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
20407 at::functionalization::impl::commit_update(out0);
20408 at::functionalization::impl::sync(out0);
20409 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
20410 at::functionalization::impl::commit_update(out1);
20411 at::functionalization::impl::sync(out1);
20412 return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
20413 }
20414 }
20415
20416 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transformer_decoder_only_layer_fwd_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
20417 if (false) {
20418 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
20419 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
20420 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
20421 auto src_meta = to_meta(src);
20422 auto qkv_weight_meta = to_meta(qkv_weight);
20423 auto qkv_bias_meta = to_meta(qkv_bias);
20424 auto proj_weight_meta = to_meta(proj_weight);
20425 auto proj_bias_meta = to_meta(proj_bias);
20426 auto norm_weight_1_meta = to_meta(norm_weight_1);
20427 auto norm_bias_1_meta = to_meta(norm_bias_1);
20428 auto norm_weight_2_meta = to_meta(norm_weight_2);
20429 auto norm_bias_2_meta = to_meta(norm_bias_2);
20430 auto ffn_weight_1_meta = to_meta(ffn_weight_1);
20431 auto ffn_bias_1_meta = to_meta(ffn_bias_1);
20432 auto ffn_weight_2_meta = to_meta(ffn_weight_2);
20433 auto ffn_bias_2_meta = to_meta(ffn_bias_2);
20434 auto mask_meta = to_meta(mask);
20435 auto incr_key_meta = to_meta(incr_key);
20436 auto incr_value_meta = to_meta(incr_value);
20437 auto out0_meta = to_meta(out0);
20438 auto out1_meta = to_meta(out1);
20439 auto out2_meta = to_meta(out2);
20440 at::AutoDispatchSkipFunctionalize func_guard;
20441 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
20442 at::_ops::_transformer_decoder_only_layer_fwd_out::call(src_meta, embed_dim, num_heads, qkv_weight_meta, qkv_bias_meta, proj_weight_meta, proj_bias_meta, use_gelu, norm_first, eps, norm_weight_1_meta, norm_bias_1_meta, norm_weight_2_meta, norm_bias_2_meta, ffn_weight_1_meta, ffn_bias_1_meta, ffn_weight_2_meta, ffn_bias_2_meta, mask_meta, incr_key_meta, incr_value_meta, out0_meta, out1_meta, out2_meta);
20443 }
20444
20445 at::Tensor src_;
20446 if (at::functionalization::impl::isFunctionalTensor(src)) {
20447 at::functionalization::impl::sync(src);
20448 src_ = at::functionalization::impl::from_functional_tensor(src);
20449 } else {
20450 src_ = src;
20451 }
20452
20453 at::Tensor qkv_weight_;
20454 if (at::functionalization::impl::isFunctionalTensor(qkv_weight)) {
20455 at::functionalization::impl::sync(qkv_weight);
20456 qkv_weight_ = at::functionalization::impl::from_functional_tensor(qkv_weight);
20457 } else {
20458 qkv_weight_ = qkv_weight;
20459 }
20460
20461 at::Tensor qkv_bias_;
20462 if (at::functionalization::impl::isFunctionalTensor(qkv_bias)) {
20463 at::functionalization::impl::sync(qkv_bias);
20464 qkv_bias_ = at::functionalization::impl::from_functional_tensor(qkv_bias);
20465 } else {
20466 qkv_bias_ = qkv_bias;
20467 }
20468
20469 at::Tensor proj_weight_;
20470 if (at::functionalization::impl::isFunctionalTensor(proj_weight)) {
20471 at::functionalization::impl::sync(proj_weight);
20472 proj_weight_ = at::functionalization::impl::from_functional_tensor(proj_weight);
20473 } else {
20474 proj_weight_ = proj_weight;
20475 }
20476
20477 at::Tensor proj_bias_;
20478 if (at::functionalization::impl::isFunctionalTensor(proj_bias)) {
20479 at::functionalization::impl::sync(proj_bias);
20480 proj_bias_ = at::functionalization::impl::from_functional_tensor(proj_bias);
20481 } else {
20482 proj_bias_ = proj_bias;
20483 }
20484
20485 at::Tensor norm_weight_1_;
20486 if (at::functionalization::impl::isFunctionalTensor(norm_weight_1)) {
20487 at::functionalization::impl::sync(norm_weight_1);
20488 norm_weight_1_ = at::functionalization::impl::from_functional_tensor(norm_weight_1);
20489 } else {
20490 norm_weight_1_ = norm_weight_1;
20491 }
20492
20493 at::Tensor norm_bias_1_;
20494 if (at::functionalization::impl::isFunctionalTensor(norm_bias_1)) {
20495 at::functionalization::impl::sync(norm_bias_1);
20496 norm_bias_1_ = at::functionalization::impl::from_functional_tensor(norm_bias_1);
20497 } else {
20498 norm_bias_1_ = norm_bias_1;
20499 }
20500
20501 at::Tensor norm_weight_2_;
20502 if (at::functionalization::impl::isFunctionalTensor(norm_weight_2)) {
20503 at::functionalization::impl::sync(norm_weight_2);
20504 norm_weight_2_ = at::functionalization::impl::from_functional_tensor(norm_weight_2);
20505 } else {
20506 norm_weight_2_ = norm_weight_2;
20507 }
20508
20509 at::Tensor norm_bias_2_;
20510 if (at::functionalization::impl::isFunctionalTensor(norm_bias_2)) {
20511 at::functionalization::impl::sync(norm_bias_2);
20512 norm_bias_2_ = at::functionalization::impl::from_functional_tensor(norm_bias_2);
20513 } else {
20514 norm_bias_2_ = norm_bias_2;
20515 }
20516
20517 at::Tensor ffn_weight_1_;
20518 if (at::functionalization::impl::isFunctionalTensor(ffn_weight_1)) {
20519 at::functionalization::impl::sync(ffn_weight_1);
20520 ffn_weight_1_ = at::functionalization::impl::from_functional_tensor(ffn_weight_1);
20521 } else {
20522 ffn_weight_1_ = ffn_weight_1;
20523 }
20524
20525 at::Tensor ffn_bias_1_;
20526 if (at::functionalization::impl::isFunctionalTensor(ffn_bias_1)) {
20527 at::functionalization::impl::sync(ffn_bias_1);
20528 ffn_bias_1_ = at::functionalization::impl::from_functional_tensor(ffn_bias_1);
20529 } else {
20530 ffn_bias_1_ = ffn_bias_1;
20531 }
20532
20533 at::Tensor ffn_weight_2_;
20534 if (at::functionalization::impl::isFunctionalTensor(ffn_weight_2)) {
20535 at::functionalization::impl::sync(ffn_weight_2);
20536 ffn_weight_2_ = at::functionalization::impl::from_functional_tensor(ffn_weight_2);
20537 } else {
20538 ffn_weight_2_ = ffn_weight_2;
20539 }
20540
20541 at::Tensor ffn_bias_2_;
20542 if (at::functionalization::impl::isFunctionalTensor(ffn_bias_2)) {
20543 at::functionalization::impl::sync(ffn_bias_2);
20544 ffn_bias_2_ = at::functionalization::impl::from_functional_tensor(ffn_bias_2);
20545 } else {
20546 ffn_bias_2_ = ffn_bias_2;
20547 }
20548
20549 c10::optional<at::Tensor> mask_;
20550 if (at::functionalization::impl::isFunctionalTensor(mask)) {
20551 at::functionalization::impl::sync(mask);
20552 mask_ = at::functionalization::impl::from_functional_tensor(mask);
20553 } else {
20554 mask_ = mask;
20555 }
20556
20557 c10::optional<at::Tensor> incr_key_;
20558 if (at::functionalization::impl::isFunctionalTensor(incr_key)) {
20559 at::functionalization::impl::sync(incr_key);
20560 incr_key_ = at::functionalization::impl::from_functional_tensor(incr_key);
20561 } else {
20562 incr_key_ = incr_key;
20563 }
20564
20565 c10::optional<at::Tensor> incr_value_;
20566 if (at::functionalization::impl::isFunctionalTensor(incr_value)) {
20567 at::functionalization::impl::sync(incr_value);
20568 incr_value_ = at::functionalization::impl::from_functional_tensor(incr_value);
20569 } else {
20570 incr_value_ = incr_value;
20571 }
20572
20573 at::Tensor out0_;
20574 if (at::functionalization::impl::isFunctionalTensor(out0)) {
20575 at::functionalization::impl::sync(out0);
20576 out0_ = at::functionalization::impl::from_functional_tensor(out0);
20577 } else {
20578 out0_ = out0;
20579 }
20580
20581 at::Tensor out1_;
20582 if (at::functionalization::impl::isFunctionalTensor(out1)) {
20583 at::functionalization::impl::sync(out1);
20584 out1_ = at::functionalization::impl::from_functional_tensor(out1);
20585 } else {
20586 out1_ = out1;
20587 }
20588
20589 at::Tensor out2_;
20590 if (at::functionalization::impl::isFunctionalTensor(out2)) {
20591 at::functionalization::impl::sync(out2);
20592 out2_ = at::functionalization::impl::from_functional_tensor(out2);
20593 } else {
20594 out2_ = out2;
20595 }
20596 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
20597 if ((false || at::functionalization::impl::isFunctionalTensor(src) || at::functionalization::impl::isFunctionalTensor(qkv_weight) || at::functionalization::impl::isFunctionalTensor(qkv_bias) || at::functionalization::impl::isFunctionalTensor(proj_weight) || at::functionalization::impl::isFunctionalTensor(proj_bias) || at::functionalization::impl::isFunctionalTensor(norm_weight_1) || at::functionalization::impl::isFunctionalTensor(norm_bias_1) || at::functionalization::impl::isFunctionalTensor(norm_weight_2) || at::functionalization::impl::isFunctionalTensor(norm_bias_2) || at::functionalization::impl::isFunctionalTensor(ffn_weight_1) || at::functionalization::impl::isFunctionalTensor(ffn_bias_1) || at::functionalization::impl::isFunctionalTensor(ffn_weight_2) || at::functionalization::impl::isFunctionalTensor(ffn_bias_2) || at::functionalization::impl::isFunctionalTensor(mask) || at::functionalization::impl::isFunctionalTensor(incr_key) || at::functionalization::impl::isFunctionalTensor(incr_value))) {
20598 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
20599 TORCH_INTERNAL_ASSERT(false,
20600 "mutating a non-functional tensor with a functional tensor is not allowed.",
20601 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
20602 } else {
20603 // case 2: arguments are not functional tensors, so we no-op and redispatch.
20604 at::AutoDispatchSkipFunctionalize guard;
20605 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_transformer_decoder_only_layer_fwd_out::call(src_, embed_dim, num_heads, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, use_gelu, norm_first, eps, norm_weight_1_, norm_bias_1_, norm_weight_2_, norm_bias_2_, ffn_weight_1_, ffn_bias_1_, ffn_weight_2_, ffn_bias_2_, mask_, incr_key_, incr_value_, out0_, out1_, out2_);
20606 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);;
20607 }
20608 } else {
20609 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
20610 {
20611 at::AutoDispatchSkipFunctionalize guard;
20612 tmp_output = at::_ops::_transformer_decoder_only_layer_fwd::call(src_, embed_dim, num_heads, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, use_gelu, norm_first, eps, norm_weight_1_, norm_bias_1_, norm_weight_2_, norm_bias_2_, ffn_weight_1_, ffn_bias_1_, ffn_weight_2_, ffn_bias_2_, mask_, incr_key_, incr_value_);
20613 }
20614 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
20615 at::functionalization::impl::commit_update(out0);
20616 at::functionalization::impl::sync(out0);
20617 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
20618 at::functionalization::impl::commit_update(out1);
20619 at::functionalization::impl::sync(out1);
20620 at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
20621 at::functionalization::impl::commit_update(out2);
20622 at::functionalization::impl::sync(out2);
20623 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
20624 }
20625 }
20626
20627 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _native_decoder_only_multi_head_attention_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, bool need_weights, bool average_attn_weights, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
20628 if (false) {
20629 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
20630 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
20631 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
20632 auto query_meta = to_meta(query);
20633 auto key_meta = to_meta(key);
20634 auto value_meta = to_meta(value);
20635 auto qkv_weight_meta = to_meta(qkv_weight);
20636 auto qkv_bias_meta = to_meta(qkv_bias);
20637 auto proj_weight_meta = to_meta(proj_weight);
20638 auto proj_bias_meta = to_meta(proj_bias);
20639 auto mask_meta = to_meta(mask);
20640 auto incr_key_meta = to_meta(incr_key);
20641 auto incr_value_meta = to_meta(incr_value);
20642 auto out0_meta = to_meta(out0);
20643 auto out1_meta = to_meta(out1);
20644 auto out2_meta = to_meta(out2);
20645 auto out3_meta = to_meta(out3);
20646 at::AutoDispatchSkipFunctionalize func_guard;
20647 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
20648 at::_ops::_native_decoder_only_multi_head_attention_out::call(query_meta, key_meta, value_meta, embed_dim, num_head, qkv_weight_meta, qkv_bias_meta, proj_weight_meta, proj_bias_meta, mask_meta, incr_key_meta, incr_value_meta, need_weights, average_attn_weights, out0_meta, out1_meta, out2_meta, out3_meta);
20649 }
20650
20651 at::Tensor query_;
20652 if (at::functionalization::impl::isFunctionalTensor(query)) {
20653 at::functionalization::impl::sync(query);
20654 query_ = at::functionalization::impl::from_functional_tensor(query);
20655 } else {
20656 query_ = query;
20657 }
20658
20659 at::Tensor key_;
20660 if (at::functionalization::impl::isFunctionalTensor(key)) {
20661 at::functionalization::impl::sync(key);
20662 key_ = at::functionalization::impl::from_functional_tensor(key);
20663 } else {
20664 key_ = key;
20665 }
20666
20667 at::Tensor value_;
20668 if (at::functionalization::impl::isFunctionalTensor(value)) {
20669 at::functionalization::impl::sync(value);
20670 value_ = at::functionalization::impl::from_functional_tensor(value);
20671 } else {
20672 value_ = value;
20673 }
20674
20675 at::Tensor qkv_weight_;
20676 if (at::functionalization::impl::isFunctionalTensor(qkv_weight)) {
20677 at::functionalization::impl::sync(qkv_weight);
20678 qkv_weight_ = at::functionalization::impl::from_functional_tensor(qkv_weight);
20679 } else {
20680 qkv_weight_ = qkv_weight;
20681 }
20682
20683 at::Tensor qkv_bias_;
20684 if (at::functionalization::impl::isFunctionalTensor(qkv_bias)) {
20685 at::functionalization::impl::sync(qkv_bias);
20686 qkv_bias_ = at::functionalization::impl::from_functional_tensor(qkv_bias);
20687 } else {
20688 qkv_bias_ = qkv_bias;
20689 }
20690
20691 at::Tensor proj_weight_;
20692 if (at::functionalization::impl::isFunctionalTensor(proj_weight)) {
20693 at::functionalization::impl::sync(proj_weight);
20694 proj_weight_ = at::functionalization::impl::from_functional_tensor(proj_weight);
20695 } else {
20696 proj_weight_ = proj_weight;
20697 }
20698
20699 at::Tensor proj_bias_;
20700 if (at::functionalization::impl::isFunctionalTensor(proj_bias)) {
20701 at::functionalization::impl::sync(proj_bias);
20702 proj_bias_ = at::functionalization::impl::from_functional_tensor(proj_bias);
20703 } else {
20704 proj_bias_ = proj_bias;
20705 }
20706
20707 c10::optional<at::Tensor> mask_;
20708 if (at::functionalization::impl::isFunctionalTensor(mask)) {
20709 at::functionalization::impl::sync(mask);
20710 mask_ = at::functionalization::impl::from_functional_tensor(mask);
20711 } else {
20712 mask_ = mask;
20713 }
20714
20715 c10::optional<at::Tensor> incr_key_;
20716 if (at::functionalization::impl::isFunctionalTensor(incr_key)) {
20717 at::functionalization::impl::sync(incr_key);
20718 incr_key_ = at::functionalization::impl::from_functional_tensor(incr_key);
20719 } else {
20720 incr_key_ = incr_key;
20721 }
20722
20723 c10::optional<at::Tensor> incr_value_;
20724 if (at::functionalization::impl::isFunctionalTensor(incr_value)) {
20725 at::functionalization::impl::sync(incr_value);
20726 incr_value_ = at::functionalization::impl::from_functional_tensor(incr_value);
20727 } else {
20728 incr_value_ = incr_value;
20729 }
20730
20731 at::Tensor out0_;
20732 if (at::functionalization::impl::isFunctionalTensor(out0)) {
20733 at::functionalization::impl::sync(out0);
20734 out0_ = at::functionalization::impl::from_functional_tensor(out0);
20735 } else {
20736 out0_ = out0;
20737 }
20738
20739 at::Tensor out1_;
20740 if (at::functionalization::impl::isFunctionalTensor(out1)) {
20741 at::functionalization::impl::sync(out1);
20742 out1_ = at::functionalization::impl::from_functional_tensor(out1);
20743 } else {
20744 out1_ = out1;
20745 }
20746
20747 at::Tensor out2_;
20748 if (at::functionalization::impl::isFunctionalTensor(out2)) {
20749 at::functionalization::impl::sync(out2);
20750 out2_ = at::functionalization::impl::from_functional_tensor(out2);
20751 } else {
20752 out2_ = out2;
20753 }
20754
20755 at::Tensor out3_;
20756 if (at::functionalization::impl::isFunctionalTensor(out3)) {
20757 at::functionalization::impl::sync(out3);
20758 out3_ = at::functionalization::impl::from_functional_tensor(out3);
20759 } else {
20760 out3_ = out3;
20761 }
20762 if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) {
20763 if ((false || at::functionalization::impl::isFunctionalTensor(query) || at::functionalization::impl::isFunctionalTensor(key) || at::functionalization::impl::isFunctionalTensor(value) || at::functionalization::impl::isFunctionalTensor(qkv_weight) || at::functionalization::impl::isFunctionalTensor(qkv_bias) || at::functionalization::impl::isFunctionalTensor(proj_weight) || at::functionalization::impl::isFunctionalTensor(proj_bias) || at::functionalization::impl::isFunctionalTensor(mask) || at::functionalization::impl::isFunctionalTensor(incr_key) || at::functionalization::impl::isFunctionalTensor(incr_value))) {
20764 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
20765 TORCH_INTERNAL_ASSERT(false,
20766 "mutating a non-functional tensor with a functional tensor is not allowed.",
20767 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
20768 } else {
20769 // case 2: arguments are not functional tensors, so we no-op and redispatch.
20770 at::AutoDispatchSkipFunctionalize guard;
20771 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_native_decoder_only_multi_head_attention_out::call(query_, key_, value_, embed_dim, num_head, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, mask_, incr_key_, incr_value_, need_weights, average_attn_weights, out0_, out1_, out2_, out3_);
20772 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);;
20773 }
20774 } else {
20775 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
20776 {
20777 at::AutoDispatchSkipFunctionalize guard;
20778 tmp_output = at::_ops::_native_decoder_only_multi_head_attention::call(query_, key_, value_, embed_dim, num_head, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, mask_, incr_key_, incr_value_, need_weights, average_attn_weights);
20779 }
20780 at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
20781 at::functionalization::impl::commit_update(out0);
20782 at::functionalization::impl::sync(out0);
20783 at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
20784 at::functionalization::impl::commit_update(out1);
20785 at::functionalization::impl::sync(out1);
20786 at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
20787 at::functionalization::impl::commit_update(out2);
20788 at::functionalization::impl::sync(out2);
20789 at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
20790 at::functionalization::impl::commit_update(out3);
20791 at::functionalization::impl::sync(out3);
20792 return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
20793 }
20794 }
20795
20796 at::Tensor & special_modified_bessel_i1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
20797 if (false) {
20798 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
20799 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
20800 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
20801 auto self_meta = to_meta(self);
20802 auto out_meta = to_meta(out);
20803 at::AutoDispatchSkipFunctionalize func_guard;
20804 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
20805 at::_ops::special_modified_bessel_i1_out::call(self_meta, out_meta);
20806 }
20807
20808 at::Tensor self_;
20809 if (at::functionalization::impl::isFunctionalTensor(self)) {
20810 at::functionalization::impl::sync(self);
20811 self_ = at::functionalization::impl::from_functional_tensor(self);
20812 } else {
20813 self_ = self;
20814 }
20815
20816 at::Tensor out_;
20817 if (at::functionalization::impl::isFunctionalTensor(out)) {
20818 at::functionalization::impl::sync(out);
20819 out_ = at::functionalization::impl::from_functional_tensor(out);
20820 } else {
20821 out_ = out;
20822 }
20823 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
20824 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
20825 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
20826 TORCH_INTERNAL_ASSERT(false,
20827 "mutating a non-functional tensor with a functional tensor is not allowed.",
20828 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
20829 } else {
20830 // case 2: arguments are not functional tensors, so we no-op and redispatch.
20831 at::AutoDispatchSkipFunctionalize guard;
20832 at::Tensor tmp_output = at::_ops::special_modified_bessel_i1_out::call(self_, out_);
20833 return out;;
20834 }
20835 } else {
20836 at::Tensor tmp_output;
20837 {
20838 at::AutoDispatchSkipFunctionalize guard;
20839 tmp_output = at::_ops::special_modified_bessel_i1::call(self_);
20840 }
20841 at::functionalization::impl::replace_(out, tmp_output);
20842 at::functionalization::impl::commit_update(out);
20843 at::functionalization::impl::sync(out);
20844 return out;
20845 }
20846 }
20847
20848 at::Tensor & special_modified_bessel_k0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
20849 if (false) {
20850 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
20851 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
20852 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
20853 auto self_meta = to_meta(self);
20854 auto out_meta = to_meta(out);
20855 at::AutoDispatchSkipFunctionalize func_guard;
20856 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
20857 at::_ops::special_modified_bessel_k0_out::call(self_meta, out_meta);
20858 }
20859
20860 at::Tensor self_;
20861 if (at::functionalization::impl::isFunctionalTensor(self)) {
20862 at::functionalization::impl::sync(self);
20863 self_ = at::functionalization::impl::from_functional_tensor(self);
20864 } else {
20865 self_ = self;
20866 }
20867
20868 at::Tensor out_;
20869 if (at::functionalization::impl::isFunctionalTensor(out)) {
20870 at::functionalization::impl::sync(out);
20871 out_ = at::functionalization::impl::from_functional_tensor(out);
20872 } else {
20873 out_ = out;
20874 }
20875 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
20876 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
20877 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
20878 TORCH_INTERNAL_ASSERT(false,
20879 "mutating a non-functional tensor with a functional tensor is not allowed.",
20880 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
20881 } else {
20882 // case 2: arguments are not functional tensors, so we no-op and redispatch.
20883 at::AutoDispatchSkipFunctionalize guard;
20884 at::Tensor tmp_output = at::_ops::special_modified_bessel_k0_out::call(self_, out_);
20885 return out;;
20886 }
20887 } else {
20888 at::Tensor tmp_output;
20889 {
20890 at::AutoDispatchSkipFunctionalize guard;
20891 tmp_output = at::_ops::special_modified_bessel_k0::call(self_);
20892 }
20893 at::functionalization::impl::replace_(out, tmp_output);
20894 at::functionalization::impl::commit_update(out);
20895 at::functionalization::impl::sync(out);
20896 return out;
20897 }
20898 }
20899
20900 at::Tensor & _foobar_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) {
20901 if (false) {
20902 // Before converting the mutable op to its functional variant, run meta tensors through the original op.
20903 // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
20904 // (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
20905 auto self_meta = to_meta(self);
20906 auto out_meta = to_meta(out);
20907 at::AutoDispatchSkipFunctionalize func_guard;
20908 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
20909 at::_ops::_foobar_out::call(self_meta, arg1, arg2, arg3, out_meta);
20910 }
20911
20912 at::Tensor self_;
20913 if (at::functionalization::impl::isFunctionalTensor(self)) {
20914 at::functionalization::impl::sync(self);
20915 self_ = at::functionalization::impl::from_functional_tensor(self);
20916 } else {
20917 self_ = self;
20918 }
20919
20920 at::Tensor out_;
20921 if (at::functionalization::impl::isFunctionalTensor(out)) {
20922 at::functionalization::impl::sync(out);
20923 out_ = at::functionalization::impl::from_functional_tensor(out);
20924 } else {
20925 out_ = out;
20926 }
20927 if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
20928 if ((false || at::functionalization::impl::isFunctionalTensor(self))) {
20929 // case 1: trying to mutate a non functional tensor with a functional tensor is an error
20930 TORCH_INTERNAL_ASSERT(false,
20931 "mutating a non-functional tensor with a functional tensor is not allowed.",
20932 " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
20933 } else {
20934 // case 2: arguments are not functional tensors, so we no-op and redispatch.
20935 at::AutoDispatchSkipFunctionalize guard;
20936 at::Tensor tmp_output = at::_ops::_foobar_out::call(self_, arg1, arg2, arg3, out_);
20937 return out;;
20938 }
20939 } else {
20940 at::Tensor tmp_output;
20941 {
20942 at::AutoDispatchSkipFunctionalize guard;
20943 tmp_output = at::_ops::_foobar::call(self_, arg1, arg2, arg3);
20944 }
20945 at::functionalization::impl::replace_(out, tmp_output);
20946 at::functionalization::impl::commit_update(out);
20947 at::functionalization::impl::sync(out);
20948 return out;
20949 }
20950 }
20951
20952 at::Tensor view_as_real(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
20953
20954 at::Tensor self_;
20955 if (at::functionalization::impl::isFunctionalTensor(self)) {
20956
20957 self_ = at::functionalization::impl::from_functional_tensor(self);
20958 } else {
20959 self_ = self;
20960 }
20961 if (!at::functionalization::impl::isFunctionalTensor(self)) {
20962 // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
20963 at::AutoDispatchSkipFunctionalize guard;
20964 return at::_ops::view_as_real::call(self_);
20965 }
20966 auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
20967 auto compute_reference_meta =
20968 self.key_set().has_backend(c10::BackendComponent::XLABit) ||
20969 self.key_set().has_backend(c10::BackendComponent::LazyBit);
20970 at::Tensor reference_tensor_output;
20971 if (compute_reference_meta) {
20972 auto self_meta = to_meta(self);
20973 at::AutoDispatchSkipFunctionalize func_guard;
20974 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
20975 reference_tensor_output = at::_ops::view_as_real::call(self_meta);
20976 }
20977 at::Tensor tmp_output;
20978 {
20979 at::AutoDispatchSkipFunctionalize guard;
20980 if (reapply_views) {
20981 tmp_output = at::_ops::view_as_real::call(self_);
20982 } else {
20983 tmp_output = at::_ops::view_as_real_copy::call(self_);
20984 }
20985 }
20986 at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
20987 [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
20988 if (reapply_views) {
20989 return at::_ops::view_as_real::call(base);
20990 } else {
20991 return at::_ops::view_as_real_copy::call(base);
20992 }
20993 },
20994 [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
20995 return at::functionalization::FunctionalInverses::view_as_real_copy_inverse(base, mutated_view, reapply_views);
20996 }
20997 );
20998 auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
20999 // See Note [Propagating strides in the functionalization pass]
21000 if (compute_reference_meta) {
21001 at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
21002 }
21003 return out;
21004 }
21005
21006 at::Tensor slice_Tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
21007
21008 at::Tensor self_;
21009 if (at::functionalization::impl::isFunctionalTensor(self)) {
21010
21011 self_ = at::functionalization::impl::from_functional_tensor(self);
21012 } else {
21013 self_ = self;
21014 }
21015 if (!at::functionalization::impl::isFunctionalTensor(self)) {
21016 // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
21017 at::AutoDispatchSkipFunctionalize guard;
21018 return at::_ops::slice_Tensor::call(self_, dim, start, end, step);
21019 }
21020 auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
21021 auto compute_reference_meta =
21022 self.key_set().has_backend(c10::BackendComponent::XLABit) ||
21023 self.key_set().has_backend(c10::BackendComponent::LazyBit);
21024 at::Tensor reference_tensor_output;
21025 if (compute_reference_meta) {
21026 auto self_meta = to_meta(self);
21027 at::AutoDispatchSkipFunctionalize func_guard;
21028 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
21029 reference_tensor_output = at::_ops::slice_Tensor::call(self_meta, dim, start, end, step);
21030 }
21031 at::Tensor tmp_output;
21032 {
21033 at::AutoDispatchSkipFunctionalize guard;
21034 if (reapply_views) {
21035 tmp_output = at::_ops::slice_Tensor::call(self_, dim, start, end, step);
21036 } else {
21037 tmp_output = at::_ops::slice_copy_Tensor::call(self_, dim, start, end, step);
21038 }
21039 }
21040 at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
21041 [reapply_views = reapply_views, dim = dim, start = start, end = end, step = step](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
21042 if (reapply_views) {
21043 return at::_ops::slice_Tensor::call(base, dim, start, end, step);
21044 } else {
21045 return at::_ops::slice_copy_Tensor::call(base, dim, start, end, step);
21046 }
21047 },
21048 [reapply_views = reapply_views, dim = dim, start = start, end = end, step = step](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
21049 return at::functionalization::FunctionalInverses::slice_copy_Tensor_inverse(base, mutated_view, reapply_views, dim, start, end, step);
21050 }
21051 );
21052 auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
21053 // See Note [Propagating strides in the functionalization pass]
21054 if (compute_reference_meta) {
21055 at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
21056 }
21057 return out;
21058 }
21059
21060 at::Tensor t(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
21061
21062 at::Tensor self_;
21063 if (at::functionalization::impl::isFunctionalTensor(self)) {
21064
21065 self_ = at::functionalization::impl::from_functional_tensor(self);
21066 } else {
21067 self_ = self;
21068 }
21069 if (!at::functionalization::impl::isFunctionalTensor(self)) {
21070 // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
21071 at::AutoDispatchSkipFunctionalize guard;
21072 return at::_ops::t::call(self_);
21073 }
21074 auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
21075 auto compute_reference_meta =
21076 self.key_set().has_backend(c10::BackendComponent::XLABit) ||
21077 self.key_set().has_backend(c10::BackendComponent::LazyBit);
21078 at::Tensor reference_tensor_output;
21079 if (compute_reference_meta) {
21080 auto self_meta = to_meta(self);
21081 at::AutoDispatchSkipFunctionalize func_guard;
21082 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
21083 reference_tensor_output = at::_ops::t::call(self_meta);
21084 }
21085 at::Tensor tmp_output;
21086 {
21087 at::AutoDispatchSkipFunctionalize guard;
21088 if (reapply_views) {
21089 tmp_output = at::_ops::t::call(self_);
21090 } else {
21091 tmp_output = at::_ops::t_copy::call(self_);
21092 }
21093 }
21094 at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
21095 [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
21096 if (reapply_views) {
21097 return at::_ops::t::call(base);
21098 } else {
21099 return at::_ops::t_copy::call(base);
21100 }
21101 },
21102 [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
21103 return at::functionalization::FunctionalInverses::t_copy_inverse(base, mutated_view, reapply_views);
21104 }
21105 );
21106 auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
21107 // See Note [Propagating strides in the functionalization pass]
21108 if (compute_reference_meta) {
21109 at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
21110 }
21111 return out;
21112 }
21113
21114 at::Tensor & t_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
21115 if (!at::functionalization::impl::isFunctionalTensor(self)) {
21116 // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
21117
21118 at::Tensor self_;
21119 if (at::functionalization::impl::isFunctionalTensor(self)) {
21120
21121 self_ = at::functionalization::impl::from_functional_tensor(self);
21122 } else {
21123 self_ = self;
21124 }
21125 at::AutoDispatchSkipFunctionalize guard;
21126 return at::_ops::t_::call(self_);
21127 }
21128 auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
21129 at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
21130 [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
21131 if (reapply_views) {
21132 return at::_ops::t::call(base);
21133 } else {
21134 return at::_ops::t_copy::call(base);
21135 }
21136 },
21137 [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
21138 return at::functionalization::FunctionalInverses::t_copy_inverse(base, mutated_view, reapply_views);
21139 }
21140 );
21141 auto compute_reference_meta =
21142 self.key_set().has_backend(c10::BackendComponent::XLABit) ||
21143 self.key_set().has_backend(c10::BackendComponent::LazyBit);
21144 at::Tensor reference_tensor_output;
21145 if (compute_reference_meta) {
21146 auto self_meta = to_meta(self);
21147 at::AutoDispatchSkipFunctionalize func_guard;
21148 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
21149 reference_tensor_output = at::_ops::t_::call(self_meta);
21150 }
21151 // This function adds the above view meta to the current tensor and replays them off the base,
21152 // mutating the size/stride info of the current FunctionalTensorWrapper.
21153 // Because of this, we need to make sure to run the reference shape function above,
21154 // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
21155 at::functionalization::impl::mutate_view_meta(self, view_meta);
21156 // See Note [Propagating strides in the functionalization pass]
21157 // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
21158 // on a reference implementation here (instead of relying on the output from the forward lambda
21159 // having the correct stride info)
21160 if (compute_reference_meta) {
21161 at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output);
21162 }
21163 return self;
21164 }
21165
21166 at::Tensor _indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
21167
21168 at::Tensor self_;
21169 if (at::functionalization::impl::isFunctionalTensor(self)) {
21170
21171 self_ = at::functionalization::impl::from_functional_tensor(self);
21172 } else {
21173 self_ = self;
21174 }
21175 if (!at::functionalization::impl::isFunctionalTensor(self)) {
21176 // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
21177 at::AutoDispatchSkipFunctionalize guard;
21178 return at::_ops::_indices::call(self_);
21179 }
21180 auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
21181 auto compute_reference_meta =
21182 self.key_set().has_backend(c10::BackendComponent::XLABit) ||
21183 self.key_set().has_backend(c10::BackendComponent::LazyBit);
21184 at::Tensor reference_tensor_output;
21185 if (compute_reference_meta) {
21186 auto self_meta = to_meta(self);
21187 at::AutoDispatchSkipFunctionalize func_guard;
21188 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
21189 reference_tensor_output = at::_ops::_indices::call(self_meta);
21190 }
21191 at::Tensor tmp_output;
21192 {
21193 at::AutoDispatchSkipFunctionalize guard;
21194 if (reapply_views) {
21195 tmp_output = at::_ops::_indices::call(self_);
21196 } else {
21197 tmp_output = at::_ops::_indices_copy::call(self_);
21198 }
21199 }
21200 at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
21201 [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
21202 if (reapply_views) {
21203 return at::_ops::_indices::call(base);
21204 } else {
21205 return at::_ops::_indices_copy::call(base);
21206 }
21207 },
21208 [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
21209 return at::functionalization::FunctionalInverses::_indices_copy_inverse(base, mutated_view, reapply_views);
21210 }
21211 );
21212 auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
21213 // See Note [Propagating strides in the functionalization pass]
21214 if (compute_reference_meta) {
21215 at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
21216 }
21217 return out;
21218 }
21219
21220 at::Tensor indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
21221
21222 at::Tensor self_;
21223 if (at::functionalization::impl::isFunctionalTensor(self)) {
21224
21225 self_ = at::functionalization::impl::from_functional_tensor(self);
21226 } else {
21227 self_ = self;
21228 }
21229 if (!at::functionalization::impl::isFunctionalTensor(self)) {
21230 // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
21231 at::AutoDispatchSkipFunctionalize guard;
21232 return at::_ops::indices::call(self_);
21233 }
21234 auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
21235 auto compute_reference_meta =
21236 self.key_set().has_backend(c10::BackendComponent::XLABit) ||
21237 self.key_set().has_backend(c10::BackendComponent::LazyBit);
21238 at::Tensor reference_tensor_output;
21239 if (compute_reference_meta) {
21240 auto self_meta = to_meta(self);
21241 at::AutoDispatchSkipFunctionalize func_guard;
21242 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
21243 reference_tensor_output = at::_ops::indices::call(self_meta);
21244 }
21245 at::Tensor tmp_output;
21246 {
21247 at::AutoDispatchSkipFunctionalize guard;
21248 if (reapply_views) {
21249 tmp_output = at::_ops::indices::call(self_);
21250 } else {
21251 tmp_output = at::_ops::indices_copy::call(self_);
21252 }
21253 }
21254 at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
21255 [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
21256 if (reapply_views) {
21257 return at::_ops::indices::call(base);
21258 } else {
21259 return at::_ops::indices_copy::call(base);
21260 }
21261 },
21262 [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
21263 return at::functionalization::FunctionalInverses::indices_copy_inverse(base, mutated_view, reapply_views);
21264 }
21265 );
21266 auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
21267 // See Note [Propagating strides in the functionalization pass]
21268 if (compute_reference_meta) {
21269 at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
21270 }
21271 return out;
21272 }
21273
21274 at::Tensor crow_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
21275
21276 at::Tensor self_;
21277 if (at::functionalization::impl::isFunctionalTensor(self)) {
21278
21279 self_ = at::functionalization::impl::from_functional_tensor(self);
21280 } else {
21281 self_ = self;
21282 }
21283 if (!at::functionalization::impl::isFunctionalTensor(self)) {
21284 // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
21285 at::AutoDispatchSkipFunctionalize guard;
21286 return at::_ops::crow_indices::call(self_);
21287 }
21288 auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
21289 auto compute_reference_meta =
21290 self.key_set().has_backend(c10::BackendComponent::XLABit) ||
21291 self.key_set().has_backend(c10::BackendComponent::LazyBit);
21292 at::Tensor reference_tensor_output;
21293 if (compute_reference_meta) {
21294 auto self_meta = to_meta(self);
21295 at::AutoDispatchSkipFunctionalize func_guard;
21296 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
21297 reference_tensor_output = at::_ops::crow_indices::call(self_meta);
21298 }
21299 at::Tensor tmp_output;
21300 {
21301 at::AutoDispatchSkipFunctionalize guard;
21302 if (reapply_views) {
21303 tmp_output = at::_ops::crow_indices::call(self_);
21304 } else {
21305 tmp_output = at::_ops::crow_indices_copy::call(self_);
21306 }
21307 }
21308 at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
21309 [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
21310 if (reapply_views) {
21311 return at::_ops::crow_indices::call(base);
21312 } else {
21313 return at::_ops::crow_indices_copy::call(base);
21314 }
21315 },
21316 [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
21317 return at::functionalization::FunctionalInverses::crow_indices_copy_inverse(base, mutated_view, reapply_views);
21318 }
21319 );
21320 auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
21321 // See Note [Propagating strides in the functionalization pass]
21322 if (compute_reference_meta) {
21323 at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
21324 }
21325 return out;
21326 }
21327
21328 at::Tensor col_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
21329
21330 at::Tensor self_;
21331 if (at::functionalization::impl::isFunctionalTensor(self)) {
21332
21333 self_ = at::functionalization::impl::from_functional_tensor(self);
21334 } else {
21335 self_ = self;
21336 }
21337 if (!at::functionalization::impl::isFunctionalTensor(self)) {
21338 // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
21339 at::AutoDispatchSkipFunctionalize guard;
21340 return at::_ops::col_indices::call(self_);
21341 }
21342 auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
21343 auto compute_reference_meta =
21344 self.key_set().has_backend(c10::BackendComponent::XLABit) ||
21345 self.key_set().has_backend(c10::BackendComponent::LazyBit);
21346 at::Tensor reference_tensor_output;
21347 if (compute_reference_meta) {
21348 auto self_meta = to_meta(self);
21349 at::AutoDispatchSkipFunctionalize func_guard;
21350 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
21351 reference_tensor_output = at::_ops::col_indices::call(self_meta);
21352 }
21353 at::Tensor tmp_output;
21354 {
21355 at::AutoDispatchSkipFunctionalize guard;
21356 if (reapply_views) {
21357 tmp_output = at::_ops::col_indices::call(self_);
21358 } else {
21359 tmp_output = at::_ops::col_indices_copy::call(self_);
21360 }
21361 }
21362 at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
21363 [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
21364 if (reapply_views) {
21365 return at::_ops::col_indices::call(base);
21366 } else {
21367 return at::_ops::col_indices_copy::call(base);
21368 }
21369 },
21370 [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
21371 return at::functionalization::FunctionalInverses::col_indices_copy_inverse(base, mutated_view, reapply_views);
21372 }
21373 );
21374 auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
21375 // See Note [Propagating strides in the functionalization pass]
21376 if (compute_reference_meta) {
21377 at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
21378 }
21379 return out;
21380 }
21381
21382 at::Tensor unfold(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
21383
21384 at::Tensor self_;
21385 if (at::functionalization::impl::isFunctionalTensor(self)) {
21386
21387 self_ = at::functionalization::impl::from_functional_tensor(self);
21388 } else {
21389 self_ = self;
21390 }
21391 if (!at::functionalization::impl::isFunctionalTensor(self)) {
21392 // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
21393 at::AutoDispatchSkipFunctionalize guard;
21394 return at::_ops::unfold::call(self_, dimension, size, step);
21395 }
21396 auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
21397 auto compute_reference_meta =
21398 self.key_set().has_backend(c10::BackendComponent::XLABit) ||
21399 self.key_set().has_backend(c10::BackendComponent::LazyBit);
21400 at::Tensor reference_tensor_output;
21401 if (compute_reference_meta) {
21402 auto self_meta = to_meta(self);
21403 at::AutoDispatchSkipFunctionalize func_guard;
21404 c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
21405 reference_tensor_output = at::_ops::unfold::call(self_meta, dimension, size, step);
21406 }
21407 at::Tensor tmp_output;
21408 {
21409 at::AutoDispatchSkipFunctionalize guard;
21410 if (reapply_views) {
21411 tmp_output = at::_ops::unfold::call(self_, dimension, size, step);
21412 } else {
21413 tmp_output = at::_ops::unfold_copy::call(self_, dimension, size, step);
21414 }
21415 }
21416 at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
21417 [reapply_views = reapply_views, dimension = dimension, size = size, step = step](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
21418 if (reapply_views) {
21419 return at::_ops::unfold::call(base, dimension, size, step);
21420 } else {
21421 return at::_ops::unfold_copy::call(base, dimension, size, step);
21422 }
21423 },
21424 [reapply_views = reapply_views, dimension = dimension, size = size, step = step](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
21425 return at::functionalization::FunctionalInverses::unfold_copy_inverse(base, mutated_view, reapply_views, dimension, size, step);
21426 }
21427 );
21428 auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
21429 // See Note [Propagating strides in the functionalization pass]
21430 if (compute_reference_meta) {
21431 at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
21432 }
21433 return out;
21434 }
21435
21436} // namespace functionalization
21437
21438namespace {
21439
21440TORCH_LIBRARY_IMPL(aten, Functionalize, m) {
21441 m.impl("_cudnn_rnn_flatten_weight.out", TORCH_FN(functionalization::_cudnn_rnn_flatten_weight_out_out));
21442 m.impl("abs.out", TORCH_FN(functionalization::abs_out_out));
21443 m.impl("abs_", TORCH_FN(functionalization::abs_));
21444 m.impl("absolute", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::absolute));
21445 m.impl("absolute.out", static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::absolute_out));
21446 m.impl("absolute_", static_cast<at::Tensor & (*)(at::Tensor & self)>(at::native::absolute_));
21447 m.impl("_conj_physical.out", TORCH_FN(functionalization::_conj_physical_out_out));
21448 m.impl("acos.out", TORCH_FN(functionalization::acos_out_out));
21449 m.impl("acos_", TORCH_FN(functionalization::acos_));
21450 m.impl("arccos", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::arccos));
21451 m.impl("arccos.out", static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::arccos_out));
21452 m.impl("arccos_", static_cast<at::Tensor & (*)(at::Tensor & self)>(at::native::arccos_));
21453 m.impl("affine_grid_generator.out", TORCH_FN(functionalization::affine_grid_generator_out_out));
21454 m.impl("arange.out", TORCH_FN(functionalization::arange_out_out));
21455 m.impl("arange.start_out", TORCH_FN(functionalization::arange_out_start_out));
21456 m.impl("arccosh", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::arccosh));
21457 m.impl("arccosh.out", static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::arccosh_out));
21458 m.impl("arccosh_", static_cast<at::Tensor & (*)(at::Tensor & self)>(at::native::arccosh_));
21459 m.impl("arcsinh", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::arcsinh));
21460 m.impl("arcsinh.out", static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::arcsinh_out));
21461 m.impl("arcsinh_", static_cast<at::Tensor & (*)(at::Tensor & self)>(at::native::arcsinh_));
21462 m.impl("atanh.out", TORCH_FN(functionalization::atanh_out_out));
21463 m.impl("atanh_", TORCH_FN(functionalization::atanh_));
21464 m.impl("asin.out", TORCH_FN(functionalization::asin_out_out));
21465 m.impl("asin_", TORCH_FN(functionalization::asin_));
21466 m.impl("binary_cross_entropy_backward.grad_input", TORCH_FN(functionalization::binary_cross_entropy_backward_out_grad_input));
21467 m.impl("binary_cross_entropy_with_logits.out", TORCH_FN(functionalization::binary_cross_entropy_with_logits_out_out));
21468 m.impl("logical_not.out", TORCH_FN(functionalization::logical_not_out_out));
21469 m.impl("logical_not_", TORCH_FN(functionalization::logical_not_));
21470 m.impl("logical_and.out", TORCH_FN(functionalization::logical_and_out_out));
21471 m.impl("logical_and_", TORCH_FN(functionalization::logical_and_));
21472 m.impl("concatenate", static_cast<at::Tensor (*)(at::TensorList tensors, int64_t dim)>(at::native::concatenate));
21473 m.impl("concatenate.out", static_cast<at::Tensor & (*)(at::TensorList tensors, int64_t dim, at::Tensor & out)>(at::native::concatenate_out));
21474 m.impl("concatenate.names", static_cast<at::Tensor (*)(at::TensorList tensors, at::Dimname dim)>(at::native::concatenate));
21475 m.impl("concatenate.names_out", static_cast<at::Tensor & (*)(at::TensorList tensors, at::Dimname dim, at::Tensor & out)>(at::native::concatenate_out));
21476 m.impl("block_diag.out", TORCH_FN(functionalization::block_diag_out_out));
21477 m.impl("chain_matmul", static_cast<at::Tensor (*)(at::TensorList matrices)>(at::native::chain_matmul));
21478 m.impl("chain_matmul.out", static_cast<at::Tensor & (*)(at::TensorList matrices, at::Tensor & out)>(at::native::chain_matmul_out));
21479 m.impl("convolution_backward.out", TORCH_FN(functionalization::convolution_backward_out_out));
21480 m.impl("_copy_from.out", TORCH_FN(functionalization::_copy_from_out_out));
21481 m.impl("cosh.out", TORCH_FN(functionalization::cosh_out_out));
21482 m.impl("cosh_", TORCH_FN(functionalization::cosh_));
21483 m.impl("cudnn_convolution_transpose.out", TORCH_FN(functionalization::cudnn_convolution_transpose_out_out));
21484 m.impl("_mps_convolution_transpose.out", TORCH_FN(functionalization::_mps_convolution_transpose_out_out));
21485 m.impl("cudnn_grid_sampler.out", TORCH_FN(functionalization::cudnn_grid_sampler_out_out));
21486 m.impl("cumprod.out", TORCH_FN(functionalization::cumprod_out_out));
21487 m.impl("cumprod_", TORCH_FN(functionalization::cumprod_));
21488 m.impl("cumprod.dimname", static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype)>(at::native::cumprod));
21489 m.impl("cumprod.dimname_out", static_cast<at::Tensor & (*)(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out)>(at::native::cumprod_out));
21490 m.impl("cumprod_.dimname", static_cast<at::Tensor & (*)(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype)>(at::native::cumprod_));
21491 m.impl("diag_embed.out", TORCH_FN(functionalization::diag_embed_out_out));
21492 m.impl("diagonal_backward.out", TORCH_FN(functionalization::diagonal_backward_out_out));
21493 m.impl("div.out", TORCH_FN(functionalization::div_out_out));
21494 m.impl("div_.Tensor", TORCH_FN(functionalization::div__Tensor));
21495 m.impl("div.out_mode", TORCH_FN(functionalization::div_out_out_mode));
21496 m.impl("div_.Tensor_mode", TORCH_FN(functionalization::div__Tensor_mode));
21497 m.impl("div.Scalar_out", TORCH_FN(functionalization::div_out_Scalar_out));
21498 m.impl("div_.Scalar", TORCH_FN(functionalization::div__Scalar));
21499 m.impl("div.Scalar_mode_out", TORCH_FN(functionalization::div_out_Scalar_mode_out));
21500 m.impl("div_.Scalar_mode", TORCH_FN(functionalization::div__Scalar_mode));
21501 m.impl("_embedding_bag.out", TORCH_FN(functionalization::_embedding_bag_out_out));
21502 m.impl("_embedding_bag_per_sample_weights_backward.out", TORCH_FN(functionalization::_embedding_bag_per_sample_weights_backward_out_out));
21503 m.impl("new_full.out", TORCH_FN(functionalization::new_full_out_out));
21504 m.impl("empty_quantized.out", TORCH_FN(functionalization::empty_quantized_out_out));
21505 m.impl("empty_strided.out", TORCH_FN(functionalization::empty_strided_out_out));
21506 m.impl("exp.out", TORCH_FN(functionalization::exp_out_out));
21507 m.impl("exp_", TORCH_FN(functionalization::exp_));
21508 m.impl("expm1.out", TORCH_FN(functionalization::expm1_out_out));
21509 m.impl("expm1_", TORCH_FN(functionalization::expm1_));
21510 m.impl("fill.Scalar_out", TORCH_FN(functionalization::fill_out_Scalar_out));
21511 m.impl("fill_.Scalar", TORCH_FN(functionalization::fill__Scalar));
21512 m.impl("fill.Tensor_out", TORCH_FN(functionalization::fill_out_Tensor_out));
21513 m.impl("fill_.Tensor", TORCH_FN(functionalization::fill__Tensor));
21514 m.impl("floor.out", TORCH_FN(functionalization::floor_out_out));
21515 m.impl("floor_", TORCH_FN(functionalization::floor_));
21516 m.impl("floor_divide.out", TORCH_FN(functionalization::floor_divide_out_out));
21517 m.impl("floor_divide_.Tensor", TORCH_FN(functionalization::floor_divide__Tensor));
21518 m.impl("full.names_out", TORCH_FN(functionalization::full_out_names_out));
21519 m.impl("full.out", TORCH_FN(functionalization::full_out_out));
21520 m.impl("from_file.out", TORCH_FN(functionalization::from_file_out_out));
21521 m.impl("grid_sampler_2d.out", TORCH_FN(functionalization::grid_sampler_2d_out_out));
21522 m.impl("grid_sampler_3d.out", TORCH_FN(functionalization::grid_sampler_3d_out_out));
21523 m.impl("hamming_window.out", TORCH_FN(functionalization::hamming_window_out_out));
21524 m.impl("hamming_window.periodic_out", TORCH_FN(functionalization::hamming_window_out_periodic_out));
21525 m.impl("hamming_window.periodic_alpha_out", TORCH_FN(functionalization::hamming_window_out_periodic_alpha_out));
21526 m.impl("hamming_window.periodic_alpha_beta_out", TORCH_FN(functionalization::hamming_window_out_periodic_alpha_beta_out));
21527 m.impl("native_group_norm.out", TORCH_FN(functionalization::native_group_norm_out_out));
21528 m.impl("_fft_c2r.out", TORCH_FN(functionalization::_fft_c2r_out_out));
21529 m.impl("isnan.out", TORCH_FN(functionalization::isnan_out_out));
21530 m.impl("ldexp.Tensor", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::ldexp));
21531 m.impl("ldexp.out", static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::ldexp_out));
21532 m.impl("ldexp_", static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other)>(at::native::ldexp_));
21533 m.impl("log2.out", TORCH_FN(functionalization::log2_out_out));
21534 m.impl("log2_", TORCH_FN(functionalization::log2_));
21535 m.impl("logaddexp2.out", TORCH_FN(functionalization::logaddexp2_out_out));
21536 m.impl("xlogy.OutTensor", TORCH_FN(functionalization::xlogy_out_OutTensor));
21537 m.impl("xlogy_.Tensor", TORCH_FN(functionalization::xlogy__Tensor));
21538 m.impl("xlogy.OutScalar_Self", TORCH_FN(functionalization::xlogy_out_OutScalar_Self));
21539 m.impl("xlogy.OutScalar_Other", TORCH_FN(functionalization::xlogy_out_OutScalar_Other));
21540 m.impl("xlogy_.Scalar_Other", TORCH_FN(functionalization::xlogy__Scalar_Other));
21541 m.impl("logspace.out", TORCH_FN(functionalization::logspace_out_out));
21542 m.impl("matrix_power", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t n)>(at::native::matrix_power));
21543 m.impl("matrix_power.out", static_cast<at::Tensor & (*)(const at::Tensor & self, int64_t n, at::Tensor & out)>(at::native::matrix_power_out));
21544 m.impl("_aminmax.out", TORCH_FN(functionalization::_aminmax_out_out));
21545 m.impl("_aminmax.dim_out", TORCH_FN(functionalization::_aminmax_out_dim_out));
21546 m.impl("aminmax.out", TORCH_FN(functionalization::aminmax_out_out));
21547 m.impl("_compute_linear_combination.out", TORCH_FN(functionalization::_compute_linear_combination_out_out));
21548 m.impl("_mps_max_pool2d.out", TORCH_FN(functionalization::_mps_max_pool2d_out_out));
21549 m.impl("mkldnn_max_pool3d_backward.out", TORCH_FN(functionalization::mkldnn_max_pool3d_backward_out_out));
21550 m.impl("min.dim_min", TORCH_FN(functionalization::min_out_dim_min));
21551 m.impl("min.names_dim", static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::Dimname dim, bool keepdim)>(at::native::min));
21552 m.impl("min.names_dim_min", static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices)>(at::native::min_out));
21553 m.impl("mps_convolution_backward.out", TORCH_FN(functionalization::mps_convolution_backward_out_out));
21554 m.impl("miopen_rnn.out", TORCH_FN(functionalization::miopen_rnn_out_out));
21555 m.impl("mv.out", TORCH_FN(functionalization::mv_out_out));
21556 m.impl("_native_batch_norm_legit.out", TORCH_FN(functionalization::_native_batch_norm_legit_out_out));
21557 m.impl("_native_batch_norm_legit", TORCH_FN(functionalization::_native_batch_norm_legit));
21558 m.impl("_native_batch_norm_legit.no_stats_out", TORCH_FN(functionalization::_native_batch_norm_legit_out_no_stats_out));
21559 m.impl("batch_norm_stats.out", TORCH_FN(functionalization::batch_norm_stats_out_out));
21560 m.impl("batch_norm_backward_elemt.out", TORCH_FN(functionalization::batch_norm_backward_elemt_out_out));
21561 m.impl("_euclidean_dist.out", TORCH_FN(functionalization::_euclidean_dist_out_out));
21562 m.impl("_cdist_forward.out", TORCH_FN(functionalization::_cdist_forward_out_out));
21563 m.impl("_cdist_backward.out", TORCH_FN(functionalization::_cdist_backward_out_out));
21564 m.impl("pixel_unshuffle.out", TORCH_FN(functionalization::pixel_unshuffle_out_out));
21565 m.impl("rad2deg.out", TORCH_FN(functionalization::rad2deg_out_out));
21566 m.impl("rad2deg_", TORCH_FN(functionalization::rad2deg_));
21567 m.impl("scalar_tensor.out", TORCH_FN(functionalization::scalar_tensor_out_out));
21568 m.impl("rand.names_out", TORCH_FN(functionalization::rand_out_names_out));
21569 m.impl("rand.generator_with_names_out", TORCH_FN(functionalization::rand_out_generator_with_names_out));
21570 m.impl("rand.out", TORCH_FN(functionalization::rand_out_out));
21571 m.impl("rand.generator_out", static_cast<at::Tensor & (*)(at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out)>(at::native::rand_out));
21572 m.impl("rand_like.out", TORCH_FN(functionalization::rand_like_out_out));
21573 m.impl("relu.out", TORCH_FN(functionalization::relu_out_out));
21574 m.impl("relu_", TORCH_FN(functionalization::relu_));
21575 m.impl("logit.out", TORCH_FN(functionalization::logit_out_out));
21576 m.impl("logit_", TORCH_FN(functionalization::logit_));
21577 m.impl("select_scatter.out", TORCH_FN(functionalization::select_scatter_out_out));
21578 m.impl("softmax.int", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype)>(at::native::softmax));
21579 m.impl("softmax.int_out", TORCH_FN(functionalization::softmax_out_int_out));
21580 m.impl("stack.out", TORCH_FN(functionalization::stack_out_out));
21581 m.impl("vstack", static_cast<at::Tensor (*)(at::TensorList tensors)>(at::native::vstack));
21582 m.impl("vstack.out", static_cast<at::Tensor & (*)(at::TensorList tensors, at::Tensor & out)>(at::native::vstack_out));
21583 m.impl("nansum.out", TORCH_FN(functionalization::nansum_out_out));
21584 m.impl("sqrt.out", TORCH_FN(functionalization::sqrt_out_out));
21585 m.impl("sqrt_", TORCH_FN(functionalization::sqrt_));
21586 m.impl("prod.out", TORCH_FN(functionalization::prod_out_out));
21587 m.impl("prod.int_out", TORCH_FN(functionalization::prod_out_int_out));
21588 m.impl("prod.dim_Dimname", static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype)>(at::native::prod));
21589 m.impl("prod.Dimname_out", static_cast<at::Tensor & (*)(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out)>(at::native::prod_out));
21590 m.impl("threshold_backward.grad_input", TORCH_FN(functionalization::threshold_backward_out_grad_input));
21591 m.impl("_transform_bias_rescale_qkv.out", TORCH_FN(functionalization::_transform_bias_rescale_qkv_out_out));
21592 m.impl("_nested_from_padded.out", TORCH_FN(functionalization::_nested_from_padded_out_out));
21593 m.impl("_nested_tensor_size.out", TORCH_FN(functionalization::_nested_tensor_size_out_out));
21594 m.impl("_nested_view_from_buffer_copy.out", TORCH_FN(functionalization::_nested_view_from_buffer_copy_out_out));
21595 m.impl("trunc.out", TORCH_FN(functionalization::trunc_out_out));
21596 m.impl("trunc_", TORCH_FN(functionalization::trunc_));
21597 m.impl("unique_dim_consecutive.out", TORCH_FN(functionalization::unique_dim_consecutive_out_out));
21598 m.impl("where.self_out", TORCH_FN(functionalization::where_out_self_out));
21599 m.impl("_weight_norm_interface_backward.out", TORCH_FN(functionalization::_weight_norm_interface_backward_out_out));
21600 m.impl("_sample_dirichlet.out", TORCH_FN(functionalization::_sample_dirichlet_out_out));
21601 m.impl("binomial.out", TORCH_FN(functionalization::binomial_out_out));
21602 m.impl("native_norm.out", TORCH_FN(functionalization::native_norm_out_out));
21603 m.impl("native_norm.ScalarOpt_dim_dtype_out", TORCH_FN(functionalization::native_norm_out_ScalarOpt_dim_dtype_out));
21604 m.impl("_sparse_sum.dim_out", TORCH_FN(functionalization::_sparse_sum_out_dim_out));
21605 m.impl("_sparse_sum_backward.out", TORCH_FN(functionalization::_sparse_sum_backward_out_out));
21606 m.impl("_sparse_softmax.out", TORCH_FN(functionalization::_sparse_softmax_out_out));
21607 m.impl("clone.out", TORCH_FN(functionalization::clone_out_out));
21608 m.impl("resize_as.out", TORCH_FN(functionalization::resize_as_out_out));
21609 m.impl("resize_as_", TORCH_FN(functionalization::resize_as_));
21610 m.impl("zero.out", TORCH_FN(functionalization::zero_out_out));
21611 m.impl("zero_", TORCH_FN(functionalization::zero_));
21612 m.impl("heaviside.out", TORCH_FN(functionalization::heaviside_out_out));
21613 m.impl("heaviside_", TORCH_FN(functionalization::heaviside_));
21614 m.impl("addmm.out", TORCH_FN(functionalization::addmm_out_out));
21615 m.impl("addmm_", TORCH_FN(functionalization::addmm_));
21616 m.impl("_sparse_coo_tensor_with_dims.out", TORCH_FN(functionalization::_sparse_coo_tensor_with_dims_out_out));
21617 m.impl("_sparse_coo_tensor_with_dims_and_tensors.out", TORCH_FN(functionalization::_sparse_coo_tensor_with_dims_and_tensors_out_out));
21618 m.impl("sparse_resize_and_clear.out", TORCH_FN(functionalization::sparse_resize_and_clear_out_out));
21619 m.impl("sparse_resize_and_clear_", TORCH_FN(functionalization::sparse_resize_and_clear_));
21620 m.impl("hspmm.out", TORCH_FN(functionalization::hspmm_out_out));
21621 m.impl("to_sparse.sparse_dim_out", TORCH_FN(functionalization::to_sparse_out_sparse_dim_out));
21622 m.impl("to_sparse.out", TORCH_FN(functionalization::to_sparse_out_out));
21623 m.impl("to_sparse_bsr.out", TORCH_FN(functionalization::to_sparse_bsr_out_out));
21624 m.impl("to_mkldnn.out", TORCH_FN(functionalization::to_mkldnn_out_out));
21625 m.impl("mkldnn_reorder_conv3d_weight.out", TORCH_FN(functionalization::mkldnn_reorder_conv3d_weight_out_out));
21626 m.impl("q_per_channel_scales.out", TORCH_FN(functionalization::q_per_channel_scales_out_out));
21627 m.impl("int_repr.out", TORCH_FN(functionalization::int_repr_out_out));
21628 m.impl("_make_per_tensor_quantized_tensor.out", TORCH_FN(functionalization::_make_per_tensor_quantized_tensor_out_out));
21629 m.impl("_thnn_fused_lstm_cell.out", TORCH_FN(functionalization::_thnn_fused_lstm_cell_out_out));
21630 m.impl("_thnn_fused_lstm_cell_backward_impl.out", TORCH_FN(functionalization::_thnn_fused_lstm_cell_backward_impl_out_out));
21631 m.impl("masked_fill.Scalar_out", TORCH_FN(functionalization::masked_fill_out_Scalar_out));
21632 m.impl("masked_fill_.Scalar", TORCH_FN(functionalization::masked_fill__Scalar));
21633 m.impl("masked_fill.Tensor_out", TORCH_FN(functionalization::masked_fill_out_Tensor_out));
21634 m.impl("masked_fill_.Tensor", TORCH_FN(functionalization::masked_fill__Tensor));
21635 m.impl("_masked_softmax.out", TORCH_FN(functionalization::_masked_softmax_out_out));
21636 m.impl("bitwise_right_shift.Tensor_out", TORCH_FN(functionalization::bitwise_right_shift_out_Tensor_out));
21637 m.impl("bitwise_right_shift_.Tensor", TORCH_FN(functionalization::bitwise_right_shift__Tensor));
21638 m.impl("bitwise_right_shift.Tensor_Scalar_out", TORCH_FN(functionalization::bitwise_right_shift_out_Tensor_Scalar_out));
21639 m.impl("bitwise_right_shift_.Tensor_Scalar", TORCH_FN(functionalization::bitwise_right_shift__Tensor_Scalar));
21640 m.impl("bitwise_right_shift.Scalar_Tensor_out", TORCH_FN(functionalization::bitwise_right_shift_out_Scalar_Tensor_out));
21641 m.impl("cauchy.out", TORCH_FN(functionalization::cauchy_out_out));
21642 m.impl("cauchy_", TORCH_FN(functionalization::cauchy_));
21643 m.impl("log_normal.out", TORCH_FN(functionalization::log_normal_out_out));
21644 m.impl("log_normal_", TORCH_FN(functionalization::log_normal_));
21645 m.impl("diag", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t diagonal)>(at::native::diag));
21646 m.impl("diag.out", static_cast<at::Tensor & (*)(const at::Tensor & self, int64_t diagonal, at::Tensor & out)>(at::native::diag_out));
21647 m.impl("ne.Scalar_out", TORCH_FN(functionalization::ne_out_Scalar_out));
21648 m.impl("ne_.Scalar", TORCH_FN(functionalization::ne__Scalar));
21649 m.impl("ne.Tensor_out", TORCH_FN(functionalization::ne_out_Tensor_out));
21650 m.impl("ne_.Tensor", TORCH_FN(functionalization::ne__Tensor));
21651 m.impl("not_equal.Scalar", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::not_equal));
21652 m.impl("not_equal.Scalar_out", static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Scalar & other, at::Tensor & out)>(at::native::not_equal_out));
21653 m.impl("not_equal_.Scalar", static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::not_equal_));
21654 m.impl("not_equal.Tensor", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::not_equal));
21655 m.impl("not_equal.Tensor_out", static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::not_equal_out));
21656 m.impl("not_equal_.Tensor", static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other)>(at::native::not_equal_));
21657 m.impl("addcmul.out", TORCH_FN(functionalization::addcmul_out_out));
21658 m.impl("addcmul_", TORCH_FN(functionalization::addcmul_));
21659 m.impl("ormqr.out", TORCH_FN(functionalization::ormqr_out_out));
21660 m.impl("lu_unpack.out", TORCH_FN(functionalization::lu_unpack_out_out));
21661 m.impl("dist.out", TORCH_FN(functionalization::dist_out_out));
21662 m.impl("arctan2", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::arctan2));
21663 m.impl("arctan2.out", static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::arctan2_out));
21664 m.impl("arctan2_", static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other)>(at::native::arctan2_));
21665 m.impl("histc.out", TORCH_FN(functionalization::histc_out_out));
21666 m.impl("_histogramdd_from_bin_cts.out", TORCH_FN(functionalization::_histogramdd_from_bin_cts_out_out));
21667 m.impl("hypot.out", TORCH_FN(functionalization::hypot_out_out));
21668 m.impl("hypot_", TORCH_FN(functionalization::hypot_));
21669 m.impl("min.other", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::min));
21670 m.impl("min.out", static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::min_out));
21671 m.impl("msort", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::msort));
21672 m.impl("msort.out", static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::msort_out));
21673 m.impl("_foreach_add.Scalar_out", TORCH_FN(functionalization::_foreach_add_out_Scalar_out));
21674 m.impl("_foreach_add_.Scalar", TORCH_FN(functionalization::_foreach_add__Scalar));
21675 m.impl("_foreach_clamp_min.Scalar_out", TORCH_FN(functionalization::_foreach_clamp_min_out_Scalar_out));
21676 m.impl("_foreach_clamp_min_.Scalar", TORCH_FN(functionalization::_foreach_clamp_min__Scalar));
21677 m.impl("_foreach_add.List_out", TORCH_FN(functionalization::_foreach_add_out_List_out));
21678 m.impl("_foreach_add_.List", TORCH_FN(functionalization::_foreach_add__List));
21679 m.impl("_foreach_clamp_min.List_out", TORCH_FN(functionalization::_foreach_clamp_min_out_List_out));
21680 m.impl("_foreach_clamp_min_.List", TORCH_FN(functionalization::_foreach_clamp_min__List));
21681 m.impl("_foreach_add.ScalarList_out", TORCH_FN(functionalization::_foreach_add_out_ScalarList_out));
21682 m.impl("_foreach_add_.ScalarList", TORCH_FN(functionalization::_foreach_add__ScalarList));
21683 m.impl("_foreach_clamp_min.ScalarList_out", TORCH_FN(functionalization::_foreach_clamp_min_out_ScalarList_out));
21684 m.impl("_foreach_clamp_min_.ScalarList", TORCH_FN(functionalization::_foreach_clamp_min__ScalarList));
21685 m.impl("_foreach_zero.out", TORCH_FN(functionalization::_foreach_zero_out_out));
21686 m.impl("_foreach_zero_", TORCH_FN(functionalization::_foreach_zero_));
21687 m.impl("_foreach_asin.out", TORCH_FN(functionalization::_foreach_asin_out_out));
21688 m.impl("_foreach_asin_", TORCH_FN(functionalization::_foreach_asin_));
21689 m.impl("_foreach_ceil.out", TORCH_FN(functionalization::_foreach_ceil_out_out));
21690 m.impl("_foreach_ceil_", TORCH_FN(functionalization::_foreach_ceil_));
21691 m.impl("_foreach_cosh.out", TORCH_FN(functionalization::_foreach_cosh_out_out));
21692 m.impl("_foreach_cosh_", TORCH_FN(functionalization::_foreach_cosh_));
21693 m.impl("_foreach_log1p.out", TORCH_FN(functionalization::_foreach_log1p_out_out));
21694 m.impl("_foreach_log1p_", TORCH_FN(functionalization::_foreach_log1p_));
21695 m.impl("_foreach_log2.out", TORCH_FN(functionalization::_foreach_log2_out_out));
21696 m.impl("_foreach_log2_", TORCH_FN(functionalization::_foreach_log2_));
21697 m.impl("_foreach_round.out", TORCH_FN(functionalization::_foreach_round_out_out));
21698 m.impl("_foreach_round_", TORCH_FN(functionalization::_foreach_round_));
21699 m.impl("_foreach_addcdiv.Scalar_out", TORCH_FN(functionalization::_foreach_addcdiv_out_Scalar_out));
21700 m.impl("_foreach_addcdiv_.Scalar", TORCH_FN(functionalization::_foreach_addcdiv__Scalar));
21701 m.impl("_foreach_addcmul.Scalar_out", TORCH_FN(functionalization::_foreach_addcmul_out_Scalar_out));
21702 m.impl("_foreach_addcmul_.Scalar", TORCH_FN(functionalization::_foreach_addcmul__Scalar));
21703 m.impl("_foreach_addcdiv.ScalarList_out", TORCH_FN(functionalization::_foreach_addcdiv_out_ScalarList_out));
21704 m.impl("_foreach_addcdiv_.ScalarList", TORCH_FN(functionalization::_foreach_addcdiv__ScalarList));
21705 m.impl("_foreach_addcdiv.Tensor_out", TORCH_FN(functionalization::_foreach_addcdiv_out_Tensor_out));
21706 m.impl("_foreach_addcdiv_.Tensor", TORCH_FN(functionalization::_foreach_addcdiv__Tensor));
21707 m.impl("_foreach_addcmul.ScalarList_out", TORCH_FN(functionalization::_foreach_addcmul_out_ScalarList_out));
21708 m.impl("_foreach_addcmul_.ScalarList", TORCH_FN(functionalization::_foreach_addcmul__ScalarList));
21709 m.impl("_foreach_addcmul.Tensor_out", TORCH_FN(functionalization::_foreach_addcmul_out_Tensor_out));
21710 m.impl("_foreach_addcmul_.Tensor", TORCH_FN(functionalization::_foreach_addcmul__Tensor));
21711 m.impl("bucketize.Tensor_out", TORCH_FN(functionalization::bucketize_out_Tensor_out));
21712 m.impl("bucketize.Scalar_out", TORCH_FN(functionalization::bucketize_out_Scalar_out));
21713 m.impl("mse_loss.out", TORCH_FN(functionalization::mse_loss_out_out));
21714 m.impl("multi_margin_loss.out", TORCH_FN(functionalization::multi_margin_loss_out_out));
21715 m.impl("multilabel_margin_loss", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & target, int64_t reduction)>(at::native::multilabel_margin_loss));
21716 m.impl("multilabel_margin_loss.out", static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out)>(at::native::multilabel_margin_loss_out));
21717 m.impl("multilabel_margin_loss_forward.output", TORCH_FN(functionalization::multilabel_margin_loss_forward_out_output));
21718 m.impl("nll_loss2d_forward.output", TORCH_FN(functionalization::nll_loss2d_forward_out_output));
21719 m.impl("soft_margin_loss.out", TORCH_FN(functionalization::soft_margin_loss_out_out));
21720 m.impl("glu_backward.grad_input", TORCH_FN(functionalization::glu_backward_out_grad_input));
21721 m.impl("glu_backward_jvp.out", TORCH_FN(functionalization::glu_backward_jvp_out_out));
21722 m.impl("hardtanh.out", TORCH_FN(functionalization::hardtanh_out_out));
21723 m.impl("hardtanh_", TORCH_FN(functionalization::hardtanh_));
21724 m.impl("hardtanh_backward.grad_input", TORCH_FN(functionalization::hardtanh_backward_out_grad_input));
21725 m.impl("leaky_relu.out", TORCH_FN(functionalization::leaky_relu_out_out));
21726 m.impl("leaky_relu_", TORCH_FN(functionalization::leaky_relu_));
21727 m.impl("log_sigmoid_backward.grad_input", TORCH_FN(functionalization::log_sigmoid_backward_out_grad_input));
21728 m.impl("softplus.out", TORCH_FN(functionalization::softplus_out_out));
21729 m.impl("adaptive_avg_pool2d", static_cast<at::Tensor (*)(const at::Tensor & self, c10::SymIntArrayRef output_size)>(at::native::adaptive_avg_pool2d_symint));
21730 m.impl("adaptive_avg_pool2d.out", TORCH_FN(functionalization::adaptive_avg_pool2d_out_out));
21731 m.impl("_adaptive_avg_pool3d_backward.out", TORCH_FN(functionalization::_adaptive_avg_pool3d_backward_out_out));
21732 m.impl("adaptive_max_pool2d_backward.grad_input", TORCH_FN(functionalization::adaptive_max_pool2d_backward_out_grad_input));
21733 m.impl("fractional_max_pool2d_backward.grad_input", TORCH_FN(functionalization::fractional_max_pool2d_backward_out_grad_input));
21734 m.impl("fractional_max_pool3d_backward.grad_input", TORCH_FN(functionalization::fractional_max_pool3d_backward_out_grad_input));
21735 m.impl("max_pool3d_with_indices.out", TORCH_FN(functionalization::max_pool3d_with_indices_out_out));
21736 m.impl("max_pool3d_with_indices_backward.grad_input", TORCH_FN(functionalization::max_pool3d_with_indices_backward_out_grad_input));
21737 m.impl("max_unpool2d.out", TORCH_FN(functionalization::max_unpool2d_out_out));
21738 m.impl("reflection_pad2d_backward.grad_input", TORCH_FN(functionalization::reflection_pad2d_backward_out_grad_input));
21739 m.impl("upsample_bilinear2d.out", TORCH_FN(functionalization::upsample_bilinear2d_out_out));
21740 m.impl("upsample_bilinear2d_backward.grad_input", TORCH_FN(functionalization::upsample_bilinear2d_backward_out_grad_input));
21741 m.impl("_upsample_bilinear2d_aa.out", TORCH_FN(functionalization::_upsample_bilinear2d_aa_out_out));
21742 m.impl("upsample_trilinear3d_backward.grad_input", TORCH_FN(functionalization::upsample_trilinear3d_backward_out_grad_input));
21743 m.impl("_upsample_nearest_exact3d.out", TORCH_FN(functionalization::_upsample_nearest_exact3d_out_out));
21744 m.impl("upsample_nearest3d_backward.grad_input", TORCH_FN(functionalization::upsample_nearest3d_backward_out_grad_input));
21745 m.impl("logit_backward.grad_input", TORCH_FN(functionalization::logit_backward_out_grad_input));
21746 m.impl("thnn_conv2d", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding)>(at::native::thnn_conv2d));
21747 m.impl("thnn_conv2d.out", static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out)>(at::native::thnn_conv2d_out));
21748 m.impl("_slow_conv2d_backward.output_mask_out", TORCH_FN(functionalization::_slow_conv2d_backward_out_output_mask_out));
21749 m.impl("slow_conv3d", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding)>(at::native::slow_conv3d));
21750 m.impl("slow_conv3d.out", static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out)>(at::native::slow_conv3d_out));
21751 m.impl("slow_conv3d_forward.output", TORCH_FN(functionalization::slow_conv3d_forward_out_output));
21752 m.impl("slow_conv_dilated3d.out", TORCH_FN(functionalization::slow_conv_dilated3d_out_out));
21753 m.impl("special_log_ndtr.out", TORCH_FN(functionalization::special_log_ndtr_out_out));
21754 m.impl("special_exp2", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::special_exp2));
21755 m.impl("special_exp2.out", static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::special_exp2_out));
21756 m.impl("special_digamma", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::special_digamma));
21757 m.impl("special_digamma.out", static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::special_digamma_out));
21758 m.impl("special_gammaln", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::special_gammaln));
21759 m.impl("special_gammaln.out", static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::special_gammaln_out));
21760 m.impl("special_erfcx.out", TORCH_FN(functionalization::special_erfcx_out_out));
21761 m.impl("special_xlog1py.out", TORCH_FN(functionalization::special_xlog1py_out_out));
21762 m.impl("special_xlog1py.self_scalar_out", TORCH_FN(functionalization::special_xlog1py_out_self_scalar_out));
21763 m.impl("special_xlog1py.other_scalar_out", TORCH_FN(functionalization::special_xlog1py_out_other_scalar_out));
21764 m.impl("special_i1.out", TORCH_FN(functionalization::special_i1_out_out));
21765 m.impl("special_i1e.out", TORCH_FN(functionalization::special_i1e_out_out));
21766 m.impl("fft_fft", static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm)>(at::native::fft_fft));
21767 m.impl("fft_fft.out", static_cast<at::Tensor & (*)(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_fft_out));
21768 m.impl("fft_rfft", static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm)>(at::native::fft_rfft));
21769 m.impl("fft_rfft.out", static_cast<at::Tensor & (*)(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_rfft_out));
21770 m.impl("fft_hfft", static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm)>(at::native::fft_hfft));
21771 m.impl("fft_hfft.out", static_cast<at::Tensor & (*)(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_hfft_out));
21772 m.impl("fft_hfft2", static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm)>(at::native::fft_hfft2));
21773 m.impl("fft_hfft2.out", static_cast<const at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out)>(at::native::fft_hfft2_out));
21774 m.impl("fft_ifftn", static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm)>(at::native::fft_ifftn));
21775 m.impl("fft_ifftn.out", static_cast<at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_ifftn_out));
21776 m.impl("fft_rfftn", static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm)>(at::native::fft_rfftn));
21777 m.impl("fft_rfftn.out", static_cast<at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_rfftn_out));
21778 m.impl("fft_hfftn", static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm)>(at::native::fft_hfftn));
21779 m.impl("fft_hfftn.out", static_cast<const at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out)>(at::native::fft_hfftn_out));
21780 m.impl("fft_fftfreq.out", TORCH_FN(functionalization::fft_fftfreq_out_out));
21781 m.impl("linalg_det", static_cast<at::Tensor (*)(const at::Tensor & A)>(at::native::linalg_det));
21782 m.impl("linalg_det.out", static_cast<at::Tensor & (*)(const at::Tensor & A, at::Tensor & out)>(at::native::linalg_det_out));
21783 m.impl("linalg_ldl_factor_ex.out", TORCH_FN(functionalization::linalg_ldl_factor_ex_out_out));
21784 m.impl("linalg_lstsq.out", TORCH_FN(functionalization::linalg_lstsq_out_out));
21785 m.impl("linalg_matrix_exp.out", TORCH_FN(functionalization::linalg_matrix_exp_out_out));
21786 m.impl("linalg_slogdet", static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & A)>(at::native::linalg_slogdet));
21787 m.impl("linalg_slogdet.out", static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet)>(at::native::linalg_slogdet_out));
21788 m.impl("_linalg_eigh.eigenvalues", TORCH_FN(functionalization::_linalg_eigh_out_eigenvalues));
21789 m.impl("inner", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::inner));
21790 m.impl("inner.out", static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::inner_out));
21791 m.impl("linalg_matrix_norm", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype)>(at::native::linalg_matrix_norm));
21792 m.impl("linalg_matrix_norm.out", static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out)>(at::native::linalg_matrix_norm_out));
21793 m.impl("linalg_matrix_norm.str_ord", static_cast<at::Tensor (*)(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype)>(at::native::linalg_matrix_norm));
21794 m.impl("linalg_matrix_norm.str_ord_out", static_cast<at::Tensor & (*)(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out)>(at::native::linalg_matrix_norm_out));
21795 m.impl("linalg_tensorinv", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t ind)>(at::native::linalg_tensorinv));
21796 m.impl("linalg_tensorinv.out", static_cast<at::Tensor & (*)(const at::Tensor & self, int64_t ind, at::Tensor & out)>(at::native::linalg_tensorinv_out));
21797 m.impl("linalg_matrix_power", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t n)>(at::native::linalg_matrix_power));
21798 m.impl("linalg_matrix_power.out", static_cast<at::Tensor & (*)(const at::Tensor & self, int64_t n, at::Tensor & out)>(at::native::linalg_matrix_power_out));
21799 m.impl("_make_dual_copy.out", TORCH_FN(functionalization::_make_dual_copy_out_out));
21800 m.impl("view_as_real_copy.out", TORCH_FN(functionalization::view_as_real_copy_out_out));
21801 m.impl("view_as_complex_copy.out", TORCH_FN(functionalization::view_as_complex_copy_out_out));
21802 m.impl("_conj_copy.out", TORCH_FN(functionalization::_conj_copy_out_out));
21803 m.impl("_neg_view_copy.out", TORCH_FN(functionalization::_neg_view_copy_out_out));
21804 m.impl("permute_copy.out", TORCH_FN(functionalization::permute_copy_out_out));
21805 m.impl("split_copy.Tensor_out", TORCH_FN(functionalization::split_copy_out_Tensor_out));
21806 m.impl("t_copy.out", TORCH_FN(functionalization::t_copy_out_out));
21807 m.impl("ccol_indices_copy.out", TORCH_FN(functionalization::ccol_indices_copy_out_out));
21808 m.impl("unfold_copy.out", TORCH_FN(functionalization::unfold_copy_out_out));
21809 m.impl("_transformer_encoder_layer_fwd.out", TORCH_FN(functionalization::_transformer_encoder_layer_fwd_out_out));
21810 m.impl("_native_multi_head_attention.out", TORCH_FN(functionalization::_native_multi_head_attention_out_out));
21811 m.impl("_transformer_decoder_only_layer_fwd.out", TORCH_FN(functionalization::_transformer_decoder_only_layer_fwd_out_out));
21812 m.impl("_native_decoder_only_multi_head_attention.out", TORCH_FN(functionalization::_native_decoder_only_multi_head_attention_out_out));
21813 m.impl("special_modified_bessel_i1.out", TORCH_FN(functionalization::special_modified_bessel_i1_out_out));
21814 m.impl("special_modified_bessel_k0.out", TORCH_FN(functionalization::special_modified_bessel_k0_out_out));
21815 m.impl("_foobar.out", TORCH_FN(functionalization::_foobar_out_out));
21816 m.impl("view_as_real", TORCH_FN(functionalization::view_as_real));
21817 m.impl("broadcast_to", static_cast<at::Tensor (*)(const at::Tensor & self, c10::SymIntArrayRef size)>(at::native::broadcast_to_symint));
21818 m.impl("contiguous", static_cast<at::Tensor (*)(const at::Tensor & self, at::MemoryFormat memory_format)>(at::native::contiguous));
21819 m.impl("flatten.using_ints", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t start_dim, int64_t end_dim)>(at::native::flatten));
21820 m.impl("flatten.named_out_dim", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim)>(at::native::flatten));
21821 m.impl("flatten.using_names", static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim)>(at::native::flatten));
21822 m.impl("flatten.DimnameList", static_cast<at::Tensor (*)(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim)>(at::native::flatten));
21823 m.impl("moveaxis.intlist", static_cast<at::Tensor (*)(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination)>(at::native::moveaxis));
21824 m.impl("moveaxis.int", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t source, int64_t destination)>(at::native::moveaxis));
21825 m.impl("mT", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::mT));
21826 m.impl("ravel", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::ravel));
21827 m.impl("reshape", static_cast<at::Tensor (*)(const at::Tensor & self, c10::SymIntArrayRef shape)>(at::native::reshape_symint));
21828 m.impl("slice.Tensor", TORCH_FN(functionalization::slice_Tensor));
21829 m.impl("hsplit.int", static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, int64_t sections)>(at::native::hsplit));
21830 m.impl("hsplit.array", static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, at::IntArrayRef indices)>(at::native::hsplit));
21831 m.impl("t", TORCH_FN(functionalization::t));
21832 m.impl("t_", TORCH_FN(functionalization::t_));
21833 m.impl("coalesce", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::coalesce));
21834 m.impl("_indices", TORCH_FN(functionalization::_indices));
21835 m.impl("indices", TORCH_FN(functionalization::indices));
21836 m.impl("crow_indices", TORCH_FN(functionalization::crow_indices));
21837 m.impl("col_indices", TORCH_FN(functionalization::col_indices));
21838 m.impl("_autocast_to_reduced_precision", static_cast<at::Tensor (*)(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype)>(at::native::_autocast_to_reduced_precision));
21839 m.impl("swapaxes", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t axis0, int64_t axis1)>(at::native::swapaxes));
21840 m.impl("swapaxes_", static_cast<at::Tensor & (*)(at::Tensor & self, int64_t axis0, int64_t axis1)>(at::native::swapaxes_));
21841 m.impl("unfold", TORCH_FN(functionalization::unfold));
21842 m.impl("_cast_Byte", static_cast<at::Tensor (*)(const at::Tensor & self, bool non_blocking)>(at::native::_cast_Byte));
21843 m.impl("_cast_Float", static_cast<at::Tensor (*)(const at::Tensor & self, bool non_blocking)>(at::native::_cast_Float));
21844 m.impl("is_leaf", static_cast<bool (*)(const at::Tensor & self)>(at::native::is_leaf));
21845 m.impl("_reshape_from_tensor", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & shape)>(at::native::_reshape_from_tensor));
21846 m.impl("dropout", static_cast<at::Tensor (*)(const at::Tensor & input, double p, bool train)>(at::native::dropout));
21847 m.impl("dropout_", static_cast<at::Tensor & (*)(at::Tensor & self, double p, bool train)>(at::native::dropout_));
21848 m.impl("adaptive_max_pool1d", static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::IntArrayRef output_size)>(at::native::adaptive_max_pool1d));
21849 m.impl("atleast_2d", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::atleast_2d));
21850 m.impl("atleast_2d.Sequence", static_cast<::std::vector<at::Tensor> (*)(at::TensorList tensors)>(at::native::atleast_2d));
21851 m.impl("_batch_norm_impl_index_backward", static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor> (*)(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace)>(at::native::_batch_norm_impl_index_backward));
21852 m.impl("_convolution_double_backward", static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor> (*)(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask)>(at::native::_convolution_double_backward));
21853 m.impl("conv1d", static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups)>(at::native::conv1d));
21854 m.impl("conv1d.padding", static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups)>(at::native::conv1d));
21855 m.impl("corrcoef", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::corrcoef));
21856 m.impl("cummaxmin_backward", static_cast<at::Tensor (*)(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim)>(at::native::cummaxmin_backward));
21857 m.impl("cumprod_backward", static_cast<at::Tensor (*)(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output)>(at::native::cumprod_backward));
21858 m.impl("embedding_sparse_backward", static_cast<at::Tensor (*)(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq)>(at::native::embedding_sparse_backward));
21859 m.impl("_rowwise_prune", static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype)>(at::native::_rowwise_prune));
21860 m.impl("_embedding_bag_sparse_backward", static_cast<at::Tensor (*)(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx)>(at::native::_embedding_bag_sparse_backward_symint));
21861 m.impl("floor_divide.Scalar", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::floor_divide));
21862 m.impl("floor_divide_.Scalar", static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::floor_divide_));
21863 m.impl("group_norm", static_cast<at::Tensor (*)(const at::Tensor & input, int64_t num_groups, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enabled)>(at::native::group_norm));
21864 m.impl("is_distributed", static_cast<bool (*)(const at::Tensor & self)>(at::native::is_distributed));
21865 m.impl("is_neg", static_cast<bool (*)(const at::Tensor & self)>(at::native::is_neg));
21866 m.impl("is_signed", static_cast<bool (*)(const at::Tensor & self)>(at::native::is_signed));
21867 m.impl("fbgemm_linear_fp16_weight_fp32_activation", static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias)>(at::native::fbgemm_linear_fp16_weight_fp32_activation));
21868 m.impl("matrix_exp_backward", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & grad)>(at::native::matrix_exp_backward));
21869 m.impl("max_pool2d", static_cast<at::Tensor (*)(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode)>(at::native::max_pool2d));
21870 m.impl("cosine_similarity", static_cast<at::Tensor (*)(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps)>(at::native::cosine_similarity));
21871 m.impl("native_channel_shuffle", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t groups)>(at::native::math_channel_shuffle));
21872 m.impl("pinverse", static_cast<at::Tensor (*)(const at::Tensor & self, double rcond)>(at::native::pinverse));
21873 m.impl("rrelu", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator)>(at::native::rrelu));
21874 m.impl("rrelu_", static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator)>(at::native::rrelu_));
21875 m.impl("prelu", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & weight)>(at::native::prelu));
21876 m.impl("softmax.Dimname", static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype)>(at::native::softmax));
21877 m.impl("stft", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex)>(at::native::stft));
21878 m.impl("stft.center", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex)>(at::native::stft));
21879 m.impl("tile", static_cast<at::Tensor (*)(const at::Tensor & self, at::IntArrayRef dims)>(at::native::tile));
21880 m.impl("trapezoid.x", static_cast<at::Tensor (*)(const at::Tensor & y, const at::Tensor & x, int64_t dim)>(at::native::trapezoid));
21881 m.impl("trapezoid.dx", static_cast<at::Tensor (*)(const at::Tensor & y, const at::Scalar & dx, int64_t dim)>(at::native::trapezoid));
21882 m.impl("trapz.x", static_cast<at::Tensor (*)(const at::Tensor & y, const at::Tensor & x, int64_t dim)>(at::native::trapz));
21883 m.impl("trapz.dx", static_cast<at::Tensor (*)(const at::Tensor & y, double dx, int64_t dim)>(at::native::trapz));
21884 m.impl("_has_compatible_shallow_copy_type", static_cast<bool (*)(const at::Tensor & self, const at::Tensor & from)>(at::native::_has_compatible_shallow_copy_type));
21885 m.impl("vander", static_cast<at::Tensor (*)(const at::Tensor & x, c10::optional<int64_t> N, bool increasing)>(at::native::vander));
21886 m.impl("where.ScalarSelf", static_cast<at::Tensor (*)(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other)>(at::native::where));
21887 m.impl("where.ScalarOther", static_cast<at::Tensor (*)(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other)>(at::native::where));
21888 m.impl("where.Scalar", static_cast<at::Tensor (*)(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other)>(at::native::where));
21889 m.impl("where", static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & condition)>(at::native::where));
21890 m.impl("_weight_norm", static_cast<at::Tensor (*)(const at::Tensor & v, const at::Tensor & g, int64_t dim)>(at::native::_weight_norm));
21891 m.impl("_weight_norm_differentiable_backward", static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim)>(at::native::_weight_norm_differentiable_backward));
21892 m.impl("_sparse_sum", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::_sparse_sum));
21893 m.impl("_sparse_sum.dtype", static_cast<at::Tensor (*)(const at::Tensor & self, at::ScalarType dtype)>(at::native::_sparse_sum));
21894 m.impl("_sparse_sum.dim_dtype", static_cast<at::Tensor (*)(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype)>(at::native::_sparse_sum));
21895 m.impl("_sparse_softmax.int", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype)>(at::native::_sparse_softmax));
21896 m.impl("_sparse_softmax.Dimname", static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype)>(at::native::_sparse_softmax));
21897 m.impl("sparse_csr_tensor.crow_col_value_size", static_cast<at::Tensor (*)(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::sparse_csr_tensor));
21898 m.impl("sparse_bsc_tensor.ccol_row_value_size", static_cast<at::Tensor (*)(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::sparse_bsc_tensor));
21899 m.impl("sparse_csr_tensor.crow_col_value", static_cast<at::Tensor (*)(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::sparse_csr_tensor));
21900 m.impl("sparse_bsc_tensor.ccol_row_value", static_cast<at::Tensor (*)(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::sparse_bsc_tensor));
21901 m.impl("_sparse_csr_tensor_unsafe", static_cast<at::Tensor (*)(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::_sparse_csr_tensor_unsafe));
21902 m.impl("_sparse_bsc_tensor_unsafe", static_cast<at::Tensor (*)(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::_sparse_bsc_tensor_unsafe));
21903 m.impl("_validate_sparse_bsr_tensor_args", static_cast<void (*)(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size)>(at::native::_validate_sparse_bsr_tensor_args));
21904 m.impl("_saturate_weight_to_fp16", static_cast<at::Tensor (*)(const at::Tensor & weight)>(at::native::_saturate_weight_to_fp16));
21905 m.impl("item", static_cast<at::Scalar (*)(const at::Tensor & self)>(at::native::item));
21906 m.impl("_thnn_fused_lstm_cell_backward", static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> (*)(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias)>(at::native::_thnn_fused_lstm_cell_backward));
21907 m.impl("_thnn_differentiable_lstm_cell_backward", static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> (*)(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy)>(at::native::_thnn_differentiable_lstm_cell_backward));
21908 m.impl("rnn_relu.input", static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first)>(at::native::rnn_relu));
21909 m.impl("rnn_relu.data", static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional)>(at::native::rnn_relu));
21910 m.impl("quantized_lstm_cell", static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh)>(at::native::quantized_lstm_cell));
21911 m.impl("quantized_rnn_relu_cell", static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh)>(at::native::quantized_rnn_relu_cell));
21912 m.impl("_pack_padded_sequence_backward", static_cast<at::Tensor (*)(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first)>(at::native::_pack_padded_sequence_backward_symint));
21913 m.impl("histogramdd", static_cast<::std::tuple<at::Tensor,::std::vector<at::Tensor>> (*)(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density)>(at::native::histogramdd));
21914 m.impl("histogramdd.int_bins", static_cast<::std::tuple<at::Tensor,::std::vector<at::Tensor>> (*)(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density)>(at::native::histogramdd));
21915 m.impl("histogramdd.TensorList_bins", static_cast<::std::tuple<at::Tensor,::std::vector<at::Tensor>> (*)(const at::Tensor & self, at::TensorList bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density)>(at::native::histogramdd));
21916 m.impl("l1_loss", static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & target, int64_t reduction)>(at::native::l1_loss));
21917 m.impl("upsample_bilinear2d.vec", static_cast<at::Tensor (*)(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors)>(at::native::upsample_bilinear2d));
21918 m.impl("_upsample_bilinear2d_aa.vec", static_cast<at::Tensor (*)(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors)>(at::native::_upsample_bilinear2d_aa));
21919 m.impl("_upsample_nearest_exact3d.vec", static_cast<at::Tensor (*)(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors)>(at::native::_upsample_nearest_exact3d));
21920 m.impl("isfinite", static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::isfinite));
21921 m.impl("special_log_softmax", static_cast<at::Tensor (*)(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype)>(at::native::special_log_softmax));
21922 m.impl("scaled_dot_product_attention", static_cast<at::Tensor (*)(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal)>(at::native::scaled_dot_product_attention));
21923 m.impl("_scaled_dot_product_attention_math", static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const c10::optional<at::Tensor> & dropout_mask)>(at::native::_scaled_dot_product_attention_math));;
21924}
21925
21926} // namespace
21927
21928} // namespace at
21929