1// required for old g++ to compile PRId64 macros, see
2// https://github.com/pytorch/pytorch/issues/3571
3// for context
4#ifndef __STDC_FORMAT_MACROS
5#define __STDC_FORMAT_MACROS
6#endif
7
8// an external backend might generate file within its code tree
9// and check all the source files within the tree with clang-format.
10// so, disable it since the backend might have a different config.
11// clang-format off
12
13// NOTE: This condition is true for all PyTorch internal libraries, it
14// just excludes external projects such as torch_xla which
15// re-use some of the PyTorch codegen machinery.
16#if defined(CAFFE2_BUILD_MAIN_LIB) || \
17 defined(TORCH_CUDA_BUILD_MAIN_LIB) || \
18 defined(TORCH_HIP_BUILD_MAIN_LIB) || \
19 defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
20 defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
21#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
22#endif
23
24// @generated by torchgen/gen.py from RegisterDispatchKey.cpp
25
26#include <c10/core/TensorImpl.h>
27#include <c10/core/Allocator.h>
28#include <ATen/DeviceGuard.h>
29#include <ATen/NamedTensorUtils.h>
30#include <ATen/Utils.h>
31#include <ATen/WrapDimUtils.h>
32#include <ATen/Dispatch.h>
33#include <c10/util/ExclusivelyOwned.h>
34#include <c10/util/Half.h>
35#include <c10/core/UndefinedTensorImpl.h>
36#include <c10/util/Optional.h>
37#include <ATen/Tensor.h>
38#include <ATen/native/Resize.h>
39
40#include <cstddef>
41#include <functional>
42#include <memory>
43#include <utility>
44
45#include <ATen/Config.h>
46#include <ATen/core/op_registration/adaption.h>
47#include <torch/library.h>
48
49
50#include <ATen/ops/as_strided_native.h>
51#include <ATen/EmptyTensor.h>
52#include <ATen/ops/_adaptive_avg_pool2d_backward_cpu_dispatch.h>
53#include <ATen/ops/_adaptive_avg_pool2d_backward_native.h>
54#include <ATen/ops/_adaptive_avg_pool2d_cpu_dispatch.h>
55#include <ATen/ops/_adaptive_avg_pool2d_native.h>
56#include <ATen/ops/_adaptive_avg_pool3d_backward_cpu_dispatch.h>
57#include <ATen/ops/_adaptive_avg_pool3d_backward_native.h>
58#include <ATen/ops/_adaptive_avg_pool3d_cpu_dispatch.h>
59#include <ATen/ops/_adaptive_avg_pool3d_native.h>
60#include <ATen/ops/_add_relu_cpu_dispatch.h>
61#include <ATen/ops/_add_relu_native.h>
62#include <ATen/ops/_addmm_activation_cpu_dispatch.h>
63#include <ATen/ops/_addmm_activation_native.h>
64#include <ATen/ops/_aminmax_cpu_dispatch.h>
65#include <ATen/ops/_aminmax_native.h>
66#include <ATen/ops/_assert_async_cpu_dispatch.h>
67#include <ATen/ops/_assert_async_native.h>
68#include <ATen/ops/_cdist_backward_cpu_dispatch.h>
69#include <ATen/ops/_cdist_backward_native.h>
70#include <ATen/ops/_cdist_forward_cpu_dispatch.h>
71#include <ATen/ops/_cdist_forward_native.h>
72#include <ATen/ops/_cholesky_solve_helper_cpu_dispatch.h>
73#include <ATen/ops/_cholesky_solve_helper_native.h>
74#include <ATen/ops/_compute_linear_combination_cpu_dispatch.h>
75#include <ATen/ops/_compute_linear_combination_native.h>
76#include <ATen/ops/_convert_indices_from_coo_to_csr_cpu_dispatch.h>
77#include <ATen/ops/_convert_indices_from_coo_to_csr_native.h>
78#include <ATen/ops/_convert_indices_from_csr_to_coo_cpu_dispatch.h>
79#include <ATen/ops/_convert_indices_from_csr_to_coo_native.h>
80#include <ATen/ops/_ctc_loss_backward_cpu_dispatch.h>
81#include <ATen/ops/_ctc_loss_backward_native.h>
82#include <ATen/ops/_ctc_loss_cpu_dispatch.h>
83#include <ATen/ops/_ctc_loss_native.h>
84#include <ATen/ops/_cummax_helper_cpu_dispatch.h>
85#include <ATen/ops/_cummax_helper_native.h>
86#include <ATen/ops/_cummin_helper_cpu_dispatch.h>
87#include <ATen/ops/_cummin_helper_native.h>
88#include <ATen/ops/_dirichlet_grad_cpu_dispatch.h>
89#include <ATen/ops/_dirichlet_grad_native.h>
90#include <ATen/ops/_efficientzerotensor_cpu_dispatch.h>
91#include <ATen/ops/_efficientzerotensor_native.h>
92#include <ATen/ops/_embedding_bag_cpu_dispatch.h>
93#include <ATen/ops/_embedding_bag_dense_backward_cpu_dispatch.h>
94#include <ATen/ops/_embedding_bag_dense_backward_native.h>
95#include <ATen/ops/_embedding_bag_forward_only_cpu_dispatch.h>
96#include <ATen/ops/_embedding_bag_forward_only_native.h>
97#include <ATen/ops/_embedding_bag_native.h>
98#include <ATen/ops/_embedding_bag_per_sample_weights_backward_cpu_dispatch.h>
99#include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h>
100#include <ATen/ops/_empty_affine_quantized_cpu_dispatch.h>
101#include <ATen/ops/_empty_affine_quantized_native.h>
102#include <ATen/ops/_empty_per_channel_affine_quantized_cpu_dispatch.h>
103#include <ATen/ops/_empty_per_channel_affine_quantized_native.h>
104#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_cpu_dispatch.h>
105#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_native.h>
106#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_cpu_dispatch.h>
107#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_native.h>
108#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_cpu_dispatch.h>
109#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_native.h>
110#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_cpu_dispatch.h>
111#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_native.h>
112#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cpu_dispatch.h>
113#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_native.h>
114#include <ATen/ops/_fft_c2c_cpu_dispatch.h>
115#include <ATen/ops/_fft_c2c_native.h>
116#include <ATen/ops/_fft_c2r_cpu_dispatch.h>
117#include <ATen/ops/_fft_c2r_native.h>
118#include <ATen/ops/_fft_r2c_cpu_dispatch.h>
119#include <ATen/ops/_fft_r2c_native.h>
120#include <ATen/ops/_foobar_cpu_dispatch.h>
121#include <ATen/ops/_foobar_native.h>
122#include <ATen/ops/_foreach_abs_cpu_dispatch.h>
123#include <ATen/ops/_foreach_abs_native.h>
124#include <ATen/ops/_foreach_acos_cpu_dispatch.h>
125#include <ATen/ops/_foreach_acos_native.h>
126#include <ATen/ops/_foreach_add_cpu_dispatch.h>
127#include <ATen/ops/_foreach_add_native.h>
128#include <ATen/ops/_foreach_addcdiv_cpu_dispatch.h>
129#include <ATen/ops/_foreach_addcdiv_native.h>
130#include <ATen/ops/_foreach_addcmul_cpu_dispatch.h>
131#include <ATen/ops/_foreach_addcmul_native.h>
132#include <ATen/ops/_foreach_asin_cpu_dispatch.h>
133#include <ATen/ops/_foreach_asin_native.h>
134#include <ATen/ops/_foreach_atan_cpu_dispatch.h>
135#include <ATen/ops/_foreach_atan_native.h>
136#include <ATen/ops/_foreach_ceil_cpu_dispatch.h>
137#include <ATen/ops/_foreach_ceil_native.h>
138#include <ATen/ops/_foreach_clamp_max_cpu_dispatch.h>
139#include <ATen/ops/_foreach_clamp_max_native.h>
140#include <ATen/ops/_foreach_clamp_min_cpu_dispatch.h>
141#include <ATen/ops/_foreach_clamp_min_native.h>
142#include <ATen/ops/_foreach_cos_cpu_dispatch.h>
143#include <ATen/ops/_foreach_cos_native.h>
144#include <ATen/ops/_foreach_cosh_cpu_dispatch.h>
145#include <ATen/ops/_foreach_cosh_native.h>
146#include <ATen/ops/_foreach_div_cpu_dispatch.h>
147#include <ATen/ops/_foreach_div_native.h>
148#include <ATen/ops/_foreach_erf_cpu_dispatch.h>
149#include <ATen/ops/_foreach_erf_native.h>
150#include <ATen/ops/_foreach_erfc_cpu_dispatch.h>
151#include <ATen/ops/_foreach_erfc_native.h>
152#include <ATen/ops/_foreach_exp_cpu_dispatch.h>
153#include <ATen/ops/_foreach_exp_native.h>
154#include <ATen/ops/_foreach_expm1_cpu_dispatch.h>
155#include <ATen/ops/_foreach_expm1_native.h>
156#include <ATen/ops/_foreach_floor_cpu_dispatch.h>
157#include <ATen/ops/_foreach_floor_native.h>
158#include <ATen/ops/_foreach_frac_cpu_dispatch.h>
159#include <ATen/ops/_foreach_frac_native.h>
160#include <ATen/ops/_foreach_lerp_cpu_dispatch.h>
161#include <ATen/ops/_foreach_lerp_native.h>
162#include <ATen/ops/_foreach_lgamma_cpu_dispatch.h>
163#include <ATen/ops/_foreach_lgamma_native.h>
164#include <ATen/ops/_foreach_log10_cpu_dispatch.h>
165#include <ATen/ops/_foreach_log10_native.h>
166#include <ATen/ops/_foreach_log1p_cpu_dispatch.h>
167#include <ATen/ops/_foreach_log1p_native.h>
168#include <ATen/ops/_foreach_log2_cpu_dispatch.h>
169#include <ATen/ops/_foreach_log2_native.h>
170#include <ATen/ops/_foreach_log_cpu_dispatch.h>
171#include <ATen/ops/_foreach_log_native.h>
172#include <ATen/ops/_foreach_maximum_cpu_dispatch.h>
173#include <ATen/ops/_foreach_maximum_native.h>
174#include <ATen/ops/_foreach_minimum_cpu_dispatch.h>
175#include <ATen/ops/_foreach_minimum_native.h>
176#include <ATen/ops/_foreach_mul_cpu_dispatch.h>
177#include <ATen/ops/_foreach_mul_native.h>
178#include <ATen/ops/_foreach_neg_cpu_dispatch.h>
179#include <ATen/ops/_foreach_neg_native.h>
180#include <ATen/ops/_foreach_norm_cpu_dispatch.h>
181#include <ATen/ops/_foreach_norm_native.h>
182#include <ATen/ops/_foreach_reciprocal_cpu_dispatch.h>
183#include <ATen/ops/_foreach_reciprocal_native.h>
184#include <ATen/ops/_foreach_round_cpu_dispatch.h>
185#include <ATen/ops/_foreach_round_native.h>
186#include <ATen/ops/_foreach_sigmoid_cpu_dispatch.h>
187#include <ATen/ops/_foreach_sigmoid_native.h>
188#include <ATen/ops/_foreach_sin_cpu_dispatch.h>
189#include <ATen/ops/_foreach_sin_native.h>
190#include <ATen/ops/_foreach_sinh_cpu_dispatch.h>
191#include <ATen/ops/_foreach_sinh_native.h>
192#include <ATen/ops/_foreach_sqrt_cpu_dispatch.h>
193#include <ATen/ops/_foreach_sqrt_native.h>
194#include <ATen/ops/_foreach_sub_cpu_dispatch.h>
195#include <ATen/ops/_foreach_sub_native.h>
196#include <ATen/ops/_foreach_tan_cpu_dispatch.h>
197#include <ATen/ops/_foreach_tan_native.h>
198#include <ATen/ops/_foreach_tanh_cpu_dispatch.h>
199#include <ATen/ops/_foreach_tanh_native.h>
200#include <ATen/ops/_foreach_trunc_cpu_dispatch.h>
201#include <ATen/ops/_foreach_trunc_native.h>
202#include <ATen/ops/_foreach_zero_cpu_dispatch.h>
203#include <ATen/ops/_foreach_zero_native.h>
204#include <ATen/ops/_fused_moving_avg_obs_fq_helper_cpu_dispatch.h>
205#include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h>
206#include <ATen/ops/_fused_sdp_choice_cpu_dispatch.h>
207#include <ATen/ops/_fused_sdp_choice_native.h>
208#include <ATen/ops/_histogramdd_bin_edges_cpu_dispatch.h>
209#include <ATen/ops/_histogramdd_bin_edges_native.h>
210#include <ATen/ops/_histogramdd_from_bin_cts_cpu_dispatch.h>
211#include <ATen/ops/_histogramdd_from_bin_cts_native.h>
212#include <ATen/ops/_histogramdd_from_bin_tensors_cpu_dispatch.h>
213#include <ATen/ops/_histogramdd_from_bin_tensors_native.h>
214#include <ATen/ops/_index_put_impl_cpu_dispatch.h>
215#include <ATen/ops/_index_put_impl_native.h>
216#include <ATen/ops/_linalg_det_cpu_dispatch.h>
217#include <ATen/ops/_linalg_det_native.h>
218#include <ATen/ops/_linalg_eigh_cpu_dispatch.h>
219#include <ATen/ops/_linalg_eigh_native.h>
220#include <ATen/ops/_linalg_slogdet_cpu_dispatch.h>
221#include <ATen/ops/_linalg_slogdet_native.h>
222#include <ATen/ops/_linalg_solve_ex_cpu_dispatch.h>
223#include <ATen/ops/_linalg_solve_ex_native.h>
224#include <ATen/ops/_linalg_svd_cpu_dispatch.h>
225#include <ATen/ops/_linalg_svd_native.h>
226#include <ATen/ops/_local_scalar_dense_cpu_dispatch.h>
227#include <ATen/ops/_local_scalar_dense_native.h>
228#include <ATen/ops/_log_softmax_backward_data_cpu_dispatch.h>
229#include <ATen/ops/_log_softmax_backward_data_native.h>
230#include <ATen/ops/_log_softmax_cpu_dispatch.h>
231#include <ATen/ops/_log_softmax_native.h>
232#include <ATen/ops/_logcumsumexp_cpu_dispatch.h>
233#include <ATen/ops/_logcumsumexp_native.h>
234#include <ATen/ops/_make_per_channel_quantized_tensor_cpu_dispatch.h>
235#include <ATen/ops/_make_per_channel_quantized_tensor_native.h>
236#include <ATen/ops/_make_per_tensor_quantized_tensor_cpu_dispatch.h>
237#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
238#include <ATen/ops/_masked_softmax_backward_cpu_dispatch.h>
239#include <ATen/ops/_masked_softmax_backward_native.h>
240#include <ATen/ops/_masked_softmax_cpu_dispatch.h>
241#include <ATen/ops/_masked_softmax_native.h>
242#include <ATen/ops/_native_batch_norm_legit_cpu_dispatch.h>
243#include <ATen/ops/_native_batch_norm_legit_native.h>
244#include <ATen/ops/_native_decoder_only_multi_head_attention_cpu_dispatch.h>
245#include <ATen/ops/_native_decoder_only_multi_head_attention_native.h>
246#include <ATen/ops/_native_multi_head_attention_cpu_dispatch.h>
247#include <ATen/ops/_native_multi_head_attention_native.h>
248#include <ATen/ops/_nested_from_padded_cpu_dispatch.h>
249#include <ATen/ops/_nested_from_padded_native.h>
250#include <ATen/ops/_nested_tensor_from_mask_cpu_dispatch.h>
251#include <ATen/ops/_nested_tensor_from_mask_left_aligned_cpu_dispatch.h>
252#include <ATen/ops/_nested_tensor_from_mask_left_aligned_native.h>
253#include <ATen/ops/_nested_tensor_from_mask_native.h>
254#include <ATen/ops/_nested_view_from_buffer_cpu_dispatch.h>
255#include <ATen/ops/_nested_view_from_buffer_native.h>
256#include <ATen/ops/_pdist_backward_cpu_dispatch.h>
257#include <ATen/ops/_pdist_backward_native.h>
258#include <ATen/ops/_pdist_forward_cpu_dispatch.h>
259#include <ATen/ops/_pdist_forward_native.h>
260#include <ATen/ops/_prelu_kernel_backward_cpu_dispatch.h>
261#include <ATen/ops/_prelu_kernel_backward_native.h>
262#include <ATen/ops/_prelu_kernel_cpu_dispatch.h>
263#include <ATen/ops/_prelu_kernel_native.h>
264#include <ATen/ops/_reshape_alias_cpu_dispatch.h>
265#include <ATen/ops/_reshape_alias_native.h>
266#include <ATen/ops/_sample_dirichlet_cpu_dispatch.h>
267#include <ATen/ops/_sample_dirichlet_native.h>
268#include <ATen/ops/_segment_reduce_backward_cpu_dispatch.h>
269#include <ATen/ops/_segment_reduce_backward_native.h>
270#include <ATen/ops/_slow_conv2d_backward_cpu_dispatch.h>
271#include <ATen/ops/_slow_conv2d_backward_native.h>
272#include <ATen/ops/_slow_conv2d_forward_cpu_dispatch.h>
273#include <ATen/ops/_slow_conv2d_forward_native.h>
274#include <ATen/ops/_softmax_backward_data_cpu_dispatch.h>
275#include <ATen/ops/_softmax_backward_data_native.h>
276#include <ATen/ops/_softmax_cpu_dispatch.h>
277#include <ATen/ops/_softmax_native.h>
278#include <ATen/ops/_spdiags_cpu_dispatch.h>
279#include <ATen/ops/_spdiags_native.h>
280#include <ATen/ops/_stack_cpu_dispatch.h>
281#include <ATen/ops/_stack_native.h>
282#include <ATen/ops/_standard_gamma_cpu_dispatch.h>
283#include <ATen/ops/_standard_gamma_grad_cpu_dispatch.h>
284#include <ATen/ops/_standard_gamma_grad_native.h>
285#include <ATen/ops/_standard_gamma_native.h>
286#include <ATen/ops/_test_optional_filled_intlist_cpu_dispatch.h>
287#include <ATen/ops/_test_optional_filled_intlist_native.h>
288#include <ATen/ops/_test_optional_floatlist_cpu_dispatch.h>
289#include <ATen/ops/_test_optional_floatlist_native.h>
290#include <ATen/ops/_test_optional_intlist_cpu_dispatch.h>
291#include <ATen/ops/_test_optional_intlist_native.h>
292#include <ATen/ops/_transform_bias_rescale_qkv_cpu_dispatch.h>
293#include <ATen/ops/_transform_bias_rescale_qkv_native.h>
294#include <ATen/ops/_transformer_decoder_only_layer_fwd_cpu_dispatch.h>
295#include <ATen/ops/_transformer_decoder_only_layer_fwd_native.h>
296#include <ATen/ops/_transformer_encoder_layer_fwd_cpu_dispatch.h>
297#include <ATen/ops/_transformer_encoder_layer_fwd_native.h>
298#include <ATen/ops/_unique2_cpu_dispatch.h>
299#include <ATen/ops/_unique2_native.h>
300#include <ATen/ops/_unique_cpu_dispatch.h>
301#include <ATen/ops/_unique_native.h>
302#include <ATen/ops/_upsample_bicubic2d_aa_backward_cpu_dispatch.h>
303#include <ATen/ops/_upsample_bicubic2d_aa_backward_native.h>
304#include <ATen/ops/_upsample_bicubic2d_aa_cpu_dispatch.h>
305#include <ATen/ops/_upsample_bicubic2d_aa_native.h>
306#include <ATen/ops/_upsample_bilinear2d_aa_backward_cpu_dispatch.h>
307#include <ATen/ops/_upsample_bilinear2d_aa_backward_native.h>
308#include <ATen/ops/_upsample_bilinear2d_aa_cpu_dispatch.h>
309#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
310#include <ATen/ops/_upsample_nearest_exact1d_backward_cpu_dispatch.h>
311#include <ATen/ops/_upsample_nearest_exact1d_backward_native.h>
312#include <ATen/ops/_upsample_nearest_exact1d_cpu_dispatch.h>
313#include <ATen/ops/_upsample_nearest_exact1d_native.h>
314#include <ATen/ops/_upsample_nearest_exact2d_backward_cpu_dispatch.h>
315#include <ATen/ops/_upsample_nearest_exact2d_backward_native.h>
316#include <ATen/ops/_upsample_nearest_exact2d_cpu_dispatch.h>
317#include <ATen/ops/_upsample_nearest_exact2d_native.h>
318#include <ATen/ops/_upsample_nearest_exact3d_backward_cpu_dispatch.h>
319#include <ATen/ops/_upsample_nearest_exact3d_backward_native.h>
320#include <ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h>
321#include <ATen/ops/_upsample_nearest_exact3d_native.h>
322#include <ATen/ops/_validate_compressed_sparse_indices_cpu_dispatch.h>
323#include <ATen/ops/_validate_compressed_sparse_indices_native.h>
324#include <ATen/ops/_weight_norm_interface_backward_cpu_dispatch.h>
325#include <ATen/ops/_weight_norm_interface_backward_native.h>
326#include <ATen/ops/_weight_norm_interface_cpu_dispatch.h>
327#include <ATen/ops/_weight_norm_interface_native.h>
328#include <ATen/ops/abs_cpu_dispatch.h>
329#include <ATen/ops/abs_native.h>
330#include <ATen/ops/acos_cpu_dispatch.h>
331#include <ATen/ops/acos_native.h>
332#include <ATen/ops/acosh_cpu_dispatch.h>
333#include <ATen/ops/acosh_native.h>
334#include <ATen/ops/adaptive_avg_pool2d_cpu_dispatch.h>
335#include <ATen/ops/adaptive_avg_pool2d_native.h>
336#include <ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h>
337#include <ATen/ops/adaptive_avg_pool3d_backward_native.h>
338#include <ATen/ops/adaptive_avg_pool3d_cpu_dispatch.h>
339#include <ATen/ops/adaptive_avg_pool3d_native.h>
340#include <ATen/ops/adaptive_max_pool2d_backward_cpu_dispatch.h>
341#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
342#include <ATen/ops/adaptive_max_pool2d_cpu_dispatch.h>
343#include <ATen/ops/adaptive_max_pool2d_native.h>
344#include <ATen/ops/adaptive_max_pool3d_backward_cpu_dispatch.h>
345#include <ATen/ops/adaptive_max_pool3d_backward_native.h>
346#include <ATen/ops/adaptive_max_pool3d_cpu_dispatch.h>
347#include <ATen/ops/adaptive_max_pool3d_native.h>
348#include <ATen/ops/add_cpu_dispatch.h>
349#include <ATen/ops/add_native.h>
350#include <ATen/ops/addbmm_cpu_dispatch.h>
351#include <ATen/ops/addbmm_native.h>
352#include <ATen/ops/addcdiv_cpu_dispatch.h>
353#include <ATen/ops/addcdiv_native.h>
354#include <ATen/ops/addcmul_cpu_dispatch.h>
355#include <ATen/ops/addcmul_native.h>
356#include <ATen/ops/addmm_cpu_dispatch.h>
357#include <ATen/ops/addmm_native.h>
358#include <ATen/ops/addmv_cpu_dispatch.h>
359#include <ATen/ops/addmv_native.h>
360#include <ATen/ops/addr_cpu_dispatch.h>
361#include <ATen/ops/addr_native.h>
362#include <ATen/ops/all_cpu_dispatch.h>
363#include <ATen/ops/all_native.h>
364#include <ATen/ops/amax_cpu_dispatch.h>
365#include <ATen/ops/amax_native.h>
366#include <ATen/ops/amin_cpu_dispatch.h>
367#include <ATen/ops/amin_native.h>
368#include <ATen/ops/aminmax_cpu_dispatch.h>
369#include <ATen/ops/aminmax_native.h>
370#include <ATen/ops/angle_cpu_dispatch.h>
371#include <ATen/ops/angle_native.h>
372#include <ATen/ops/any_cpu_dispatch.h>
373#include <ATen/ops/any_native.h>
374#include <ATen/ops/arange_cpu_dispatch.h>
375#include <ATen/ops/arange_native.h>
376#include <ATen/ops/argmax_cpu_dispatch.h>
377#include <ATen/ops/argmax_native.h>
378#include <ATen/ops/argmin_cpu_dispatch.h>
379#include <ATen/ops/argmin_native.h>
380#include <ATen/ops/argsort_cpu_dispatch.h>
381#include <ATen/ops/argsort_native.h>
382#include <ATen/ops/as_strided_cpu_dispatch.h>
383#include <ATen/ops/as_strided_native.h>
384#include <ATen/ops/asin_cpu_dispatch.h>
385#include <ATen/ops/asin_native.h>
386#include <ATen/ops/asinh_cpu_dispatch.h>
387#include <ATen/ops/asinh_native.h>
388#include <ATen/ops/atan2_cpu_dispatch.h>
389#include <ATen/ops/atan2_native.h>
390#include <ATen/ops/atan_cpu_dispatch.h>
391#include <ATen/ops/atan_native.h>
392#include <ATen/ops/atanh_cpu_dispatch.h>
393#include <ATen/ops/atanh_native.h>
394#include <ATen/ops/avg_pool2d_backward_cpu_dispatch.h>
395#include <ATen/ops/avg_pool2d_backward_native.h>
396#include <ATen/ops/avg_pool2d_cpu_dispatch.h>
397#include <ATen/ops/avg_pool2d_native.h>
398#include <ATen/ops/avg_pool3d_backward_cpu_dispatch.h>
399#include <ATen/ops/avg_pool3d_backward_native.h>
400#include <ATen/ops/avg_pool3d_cpu_dispatch.h>
401#include <ATen/ops/avg_pool3d_native.h>
402#include <ATen/ops/baddbmm_cpu_dispatch.h>
403#include <ATen/ops/baddbmm_native.h>
404#include <ATen/ops/batch_norm_update_stats_cpu_dispatch.h>
405#include <ATen/ops/batch_norm_update_stats_native.h>
406#include <ATen/ops/bernoulli_cpu_dispatch.h>
407#include <ATen/ops/bernoulli_native.h>
408#include <ATen/ops/binary_cross_entropy_backward_cpu_dispatch.h>
409#include <ATen/ops/binary_cross_entropy_backward_native.h>
410#include <ATen/ops/binary_cross_entropy_cpu_dispatch.h>
411#include <ATen/ops/binary_cross_entropy_native.h>
412#include <ATen/ops/bincount_cpu_dispatch.h>
413#include <ATen/ops/bincount_native.h>
414#include <ATen/ops/binomial_cpu_dispatch.h>
415#include <ATen/ops/binomial_native.h>
416#include <ATen/ops/bitwise_and_cpu_dispatch.h>
417#include <ATen/ops/bitwise_and_native.h>
418#include <ATen/ops/bitwise_left_shift_cpu_dispatch.h>
419#include <ATen/ops/bitwise_left_shift_native.h>
420#include <ATen/ops/bitwise_not_cpu_dispatch.h>
421#include <ATen/ops/bitwise_not_native.h>
422#include <ATen/ops/bitwise_or_cpu_dispatch.h>
423#include <ATen/ops/bitwise_or_native.h>
424#include <ATen/ops/bitwise_right_shift_cpu_dispatch.h>
425#include <ATen/ops/bitwise_right_shift_native.h>
426#include <ATen/ops/bitwise_xor_cpu_dispatch.h>
427#include <ATen/ops/bitwise_xor_native.h>
428#include <ATen/ops/bmm_cpu_dispatch.h>
429#include <ATen/ops/bmm_native.h>
430#include <ATen/ops/bucketize_cpu_dispatch.h>
431#include <ATen/ops/bucketize_native.h>
432#include <ATen/ops/cat_cpu_dispatch.h>
433#include <ATen/ops/cat_native.h>
434#include <ATen/ops/cauchy_cpu_dispatch.h>
435#include <ATen/ops/cauchy_native.h>
436#include <ATen/ops/ceil_cpu_dispatch.h>
437#include <ATen/ops/ceil_native.h>
438#include <ATen/ops/channel_shuffle_cpu_dispatch.h>
439#include <ATen/ops/channel_shuffle_native.h>
440#include <ATen/ops/cholesky_cpu_dispatch.h>
441#include <ATen/ops/cholesky_inverse_cpu_dispatch.h>
442#include <ATen/ops/cholesky_inverse_native.h>
443#include <ATen/ops/cholesky_native.h>
444#include <ATen/ops/clamp_cpu_dispatch.h>
445#include <ATen/ops/clamp_max_cpu_dispatch.h>
446#include <ATen/ops/clamp_max_native.h>
447#include <ATen/ops/clamp_min_cpu_dispatch.h>
448#include <ATen/ops/clamp_min_native.h>
449#include <ATen/ops/clamp_native.h>
450#include <ATen/ops/col2im_cpu_dispatch.h>
451#include <ATen/ops/col2im_native.h>
452#include <ATen/ops/complex_cpu_dispatch.h>
453#include <ATen/ops/complex_native.h>
454#include <ATen/ops/conj_physical_cpu_dispatch.h>
455#include <ATen/ops/conj_physical_native.h>
456#include <ATen/ops/copysign_cpu_dispatch.h>
457#include <ATen/ops/copysign_native.h>
458#include <ATen/ops/cos_cpu_dispatch.h>
459#include <ATen/ops/cos_native.h>
460#include <ATen/ops/cosh_cpu_dispatch.h>
461#include <ATen/ops/cosh_native.h>
462#include <ATen/ops/count_nonzero_cpu_dispatch.h>
463#include <ATen/ops/count_nonzero_native.h>
464#include <ATen/ops/cumprod_cpu_dispatch.h>
465#include <ATen/ops/cumprod_native.h>
466#include <ATen/ops/cumsum_cpu_dispatch.h>
467#include <ATen/ops/cumsum_native.h>
468#include <ATen/ops/dense_dim_cpu_dispatch.h>
469#include <ATen/ops/dense_dim_native.h>
470#include <ATen/ops/dequantize_cpu_dispatch.h>
471#include <ATen/ops/dequantize_native.h>
472#include <ATen/ops/digamma_cpu_dispatch.h>
473#include <ATen/ops/digamma_native.h>
474#include <ATen/ops/div_cpu_dispatch.h>
475#include <ATen/ops/div_native.h>
476#include <ATen/ops/dot_cpu_dispatch.h>
477#include <ATen/ops/dot_native.h>
478#include <ATen/ops/elu_backward_cpu_dispatch.h>
479#include <ATen/ops/elu_backward_native.h>
480#include <ATen/ops/elu_cpu_dispatch.h>
481#include <ATen/ops/elu_native.h>
482#include <ATen/ops/embedding_dense_backward_cpu_dispatch.h>
483#include <ATen/ops/embedding_dense_backward_native.h>
484#include <ATen/ops/embedding_renorm_cpu_dispatch.h>
485#include <ATen/ops/embedding_renorm_native.h>
486#include <ATen/ops/empty_cpu_dispatch.h>
487#include <ATen/ops/empty_native.h>
488#include <ATen/ops/empty_strided_cpu_dispatch.h>
489#include <ATen/ops/empty_strided_native.h>
490#include <ATen/ops/eq_cpu_dispatch.h>
491#include <ATen/ops/eq_native.h>
492#include <ATen/ops/equal_cpu_dispatch.h>
493#include <ATen/ops/equal_native.h>
494#include <ATen/ops/erf_cpu_dispatch.h>
495#include <ATen/ops/erf_native.h>
496#include <ATen/ops/erfc_cpu_dispatch.h>
497#include <ATen/ops/erfc_native.h>
498#include <ATen/ops/erfinv_cpu_dispatch.h>
499#include <ATen/ops/erfinv_native.h>
500#include <ATen/ops/exp2_cpu_dispatch.h>
501#include <ATen/ops/exp2_native.h>
502#include <ATen/ops/exp_cpu_dispatch.h>
503#include <ATen/ops/exp_native.h>
504#include <ATen/ops/expm1_cpu_dispatch.h>
505#include <ATen/ops/expm1_native.h>
506#include <ATen/ops/exponential_cpu_dispatch.h>
507#include <ATen/ops/exponential_native.h>
508#include <ATen/ops/eye_cpu_dispatch.h>
509#include <ATen/ops/eye_native.h>
510#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_cpu_dispatch.h>
511#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_native.h>
512#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h>
513#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h>
514#include <ATen/ops/fill_cpu_dispatch.h>
515#include <ATen/ops/fill_native.h>
516#include <ATen/ops/flip_cpu_dispatch.h>
517#include <ATen/ops/flip_native.h>
518#include <ATen/ops/floor_cpu_dispatch.h>
519#include <ATen/ops/floor_divide_cpu_dispatch.h>
520#include <ATen/ops/floor_divide_native.h>
521#include <ATen/ops/floor_native.h>
522#include <ATen/ops/fmax_cpu_dispatch.h>
523#include <ATen/ops/fmax_native.h>
524#include <ATen/ops/fmin_cpu_dispatch.h>
525#include <ATen/ops/fmin_native.h>
526#include <ATen/ops/fmod_cpu_dispatch.h>
527#include <ATen/ops/fmod_native.h>
528#include <ATen/ops/frac_cpu_dispatch.h>
529#include <ATen/ops/frac_native.h>
530#include <ATen/ops/fractional_max_pool2d_backward_cpu_dispatch.h>
531#include <ATen/ops/fractional_max_pool2d_backward_native.h>
532#include <ATen/ops/fractional_max_pool2d_cpu_dispatch.h>
533#include <ATen/ops/fractional_max_pool2d_native.h>
534#include <ATen/ops/fractional_max_pool3d_backward_cpu_dispatch.h>
535#include <ATen/ops/fractional_max_pool3d_backward_native.h>
536#include <ATen/ops/fractional_max_pool3d_cpu_dispatch.h>
537#include <ATen/ops/fractional_max_pool3d_native.h>
538#include <ATen/ops/frexp_cpu_dispatch.h>
539#include <ATen/ops/frexp_native.h>
540#include <ATen/ops/from_file_cpu_dispatch.h>
541#include <ATen/ops/from_file_native.h>
542#include <ATen/ops/gather_cpu_dispatch.h>
543#include <ATen/ops/gather_native.h>
544#include <ATen/ops/gcd_cpu_dispatch.h>
545#include <ATen/ops/gcd_native.h>
546#include <ATen/ops/ge_cpu_dispatch.h>
547#include <ATen/ops/ge_native.h>
548#include <ATen/ops/gelu_backward_cpu_dispatch.h>
549#include <ATen/ops/gelu_backward_native.h>
550#include <ATen/ops/gelu_cpu_dispatch.h>
551#include <ATen/ops/gelu_native.h>
552#include <ATen/ops/geometric_cpu_dispatch.h>
553#include <ATen/ops/geometric_native.h>
554#include <ATen/ops/geqrf_cpu_dispatch.h>
555#include <ATen/ops/geqrf_native.h>
556#include <ATen/ops/glu_backward_cpu_dispatch.h>
557#include <ATen/ops/glu_backward_jvp_cpu_dispatch.h>
558#include <ATen/ops/glu_backward_jvp_native.h>
559#include <ATen/ops/glu_backward_native.h>
560#include <ATen/ops/glu_cpu_dispatch.h>
561#include <ATen/ops/glu_jvp_cpu_dispatch.h>
562#include <ATen/ops/glu_jvp_native.h>
563#include <ATen/ops/glu_native.h>
564#include <ATen/ops/grid_sampler_2d_backward_cpu_dispatch.h>
565#include <ATen/ops/grid_sampler_2d_backward_native.h>
566#include <ATen/ops/grid_sampler_2d_cpu_dispatch.h>
567#include <ATen/ops/grid_sampler_2d_native.h>
568#include <ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h>
569#include <ATen/ops/grid_sampler_3d_backward_native.h>
570#include <ATen/ops/grid_sampler_3d_cpu_dispatch.h>
571#include <ATen/ops/grid_sampler_3d_native.h>
572#include <ATen/ops/gt_cpu_dispatch.h>
573#include <ATen/ops/gt_native.h>
574#include <ATen/ops/hardshrink_backward_cpu_dispatch.h>
575#include <ATen/ops/hardshrink_backward_native.h>
576#include <ATen/ops/hardshrink_cpu_dispatch.h>
577#include <ATen/ops/hardshrink_native.h>
578#include <ATen/ops/hardsigmoid_backward_cpu_dispatch.h>
579#include <ATen/ops/hardsigmoid_backward_native.h>
580#include <ATen/ops/hardsigmoid_cpu_dispatch.h>
581#include <ATen/ops/hardsigmoid_native.h>
582#include <ATen/ops/hardswish_backward_cpu_dispatch.h>
583#include <ATen/ops/hardswish_backward_native.h>
584#include <ATen/ops/hardswish_cpu_dispatch.h>
585#include <ATen/ops/hardswish_native.h>
586#include <ATen/ops/hardtanh_backward_cpu_dispatch.h>
587#include <ATen/ops/hardtanh_backward_native.h>
588#include <ATen/ops/hardtanh_cpu_dispatch.h>
589#include <ATen/ops/hardtanh_native.h>
590#include <ATen/ops/heaviside_cpu_dispatch.h>
591#include <ATen/ops/heaviside_native.h>
592#include <ATen/ops/histc_cpu_dispatch.h>
593#include <ATen/ops/histc_native.h>
594#include <ATen/ops/histogram_cpu_dispatch.h>
595#include <ATen/ops/histogram_native.h>
596#include <ATen/ops/huber_loss_backward_cpu_dispatch.h>
597#include <ATen/ops/huber_loss_backward_native.h>
598#include <ATen/ops/huber_loss_cpu_dispatch.h>
599#include <ATen/ops/huber_loss_native.h>
600#include <ATen/ops/hypot_cpu_dispatch.h>
601#include <ATen/ops/hypot_native.h>
602#include <ATen/ops/i0_cpu_dispatch.h>
603#include <ATen/ops/i0_native.h>
604#include <ATen/ops/igamma_cpu_dispatch.h>
605#include <ATen/ops/igamma_native.h>
606#include <ATen/ops/igammac_cpu_dispatch.h>
607#include <ATen/ops/igammac_native.h>
608#include <ATen/ops/im2col_cpu_dispatch.h>
609#include <ATen/ops/im2col_native.h>
610#include <ATen/ops/index_add_cpu_dispatch.h>
611#include <ATen/ops/index_add_native.h>
612#include <ATen/ops/index_copy_cpu_dispatch.h>
613#include <ATen/ops/index_copy_native.h>
614#include <ATen/ops/index_cpu_dispatch.h>
615#include <ATen/ops/index_fill_cpu_dispatch.h>
616#include <ATen/ops/index_fill_native.h>
617#include <ATen/ops/index_native.h>
618#include <ATen/ops/index_reduce_cpu_dispatch.h>
619#include <ATen/ops/index_reduce_native.h>
620#include <ATen/ops/index_select_cpu_dispatch.h>
621#include <ATen/ops/index_select_native.h>
622#include <ATen/ops/is_set_to_cpu_dispatch.h>
623#include <ATen/ops/is_set_to_native.h>
624#include <ATen/ops/isin_cpu_dispatch.h>
625#include <ATen/ops/isin_native.h>
626#include <ATen/ops/isnan_cpu_dispatch.h>
627#include <ATen/ops/isnan_native.h>
628#include <ATen/ops/isneginf_cpu_dispatch.h>
629#include <ATen/ops/isneginf_native.h>
630#include <ATen/ops/isposinf_cpu_dispatch.h>
631#include <ATen/ops/isposinf_native.h>
632#include <ATen/ops/kthvalue_cpu_dispatch.h>
633#include <ATen/ops/kthvalue_native.h>
634#include <ATen/ops/lcm_cpu_dispatch.h>
635#include <ATen/ops/lcm_native.h>
636#include <ATen/ops/le_cpu_dispatch.h>
637#include <ATen/ops/le_native.h>
638#include <ATen/ops/leaky_relu_backward_cpu_dispatch.h>
639#include <ATen/ops/leaky_relu_backward_native.h>
640#include <ATen/ops/leaky_relu_cpu_dispatch.h>
641#include <ATen/ops/leaky_relu_native.h>
642#include <ATen/ops/lerp_cpu_dispatch.h>
643#include <ATen/ops/lerp_native.h>
644#include <ATen/ops/lgamma_cpu_dispatch.h>
645#include <ATen/ops/lgamma_native.h>
646#include <ATen/ops/linalg_cholesky_ex_cpu_dispatch.h>
647#include <ATen/ops/linalg_cholesky_ex_native.h>
648#include <ATen/ops/linalg_cross_cpu_dispatch.h>
649#include <ATen/ops/linalg_cross_native.h>
650#include <ATen/ops/linalg_eig_cpu_dispatch.h>
651#include <ATen/ops/linalg_eig_native.h>
652#include <ATen/ops/linalg_householder_product_cpu_dispatch.h>
653#include <ATen/ops/linalg_householder_product_native.h>
654#include <ATen/ops/linalg_inv_ex_cpu_dispatch.h>
655#include <ATen/ops/linalg_inv_ex_native.h>
656#include <ATen/ops/linalg_ldl_factor_ex_cpu_dispatch.h>
657#include <ATen/ops/linalg_ldl_factor_ex_native.h>
658#include <ATen/ops/linalg_ldl_solve_cpu_dispatch.h>
659#include <ATen/ops/linalg_ldl_solve_native.h>
660#include <ATen/ops/linalg_lstsq_cpu_dispatch.h>
661#include <ATen/ops/linalg_lstsq_native.h>
662#include <ATen/ops/linalg_lu_cpu_dispatch.h>
663#include <ATen/ops/linalg_lu_factor_ex_cpu_dispatch.h>
664#include <ATen/ops/linalg_lu_factor_ex_native.h>
665#include <ATen/ops/linalg_lu_native.h>
666#include <ATen/ops/linalg_lu_solve_cpu_dispatch.h>
667#include <ATen/ops/linalg_lu_solve_native.h>
668#include <ATen/ops/linalg_matrix_exp_cpu_dispatch.h>
669#include <ATen/ops/linalg_matrix_exp_native.h>
670#include <ATen/ops/linalg_qr_cpu_dispatch.h>
671#include <ATen/ops/linalg_qr_native.h>
672#include <ATen/ops/linalg_solve_triangular_cpu_dispatch.h>
673#include <ATen/ops/linalg_solve_triangular_native.h>
674#include <ATen/ops/linalg_vector_norm_cpu_dispatch.h>
675#include <ATen/ops/linalg_vector_norm_native.h>
676#include <ATen/ops/linspace_cpu_dispatch.h>
677#include <ATen/ops/linspace_native.h>
678#include <ATen/ops/log10_cpu_dispatch.h>
679#include <ATen/ops/log10_native.h>
680#include <ATen/ops/log1p_cpu_dispatch.h>
681#include <ATen/ops/log1p_native.h>
682#include <ATen/ops/log2_cpu_dispatch.h>
683#include <ATen/ops/log2_native.h>
684#include <ATen/ops/log_cpu_dispatch.h>
685#include <ATen/ops/log_native.h>
686#include <ATen/ops/log_normal_cpu_dispatch.h>
687#include <ATen/ops/log_normal_native.h>
688#include <ATen/ops/log_sigmoid_backward_cpu_dispatch.h>
689#include <ATen/ops/log_sigmoid_backward_native.h>
690#include <ATen/ops/log_sigmoid_forward_cpu_dispatch.h>
691#include <ATen/ops/log_sigmoid_forward_native.h>
692#include <ATen/ops/logaddexp2_cpu_dispatch.h>
693#include <ATen/ops/logaddexp2_native.h>
694#include <ATen/ops/logaddexp_cpu_dispatch.h>
695#include <ATen/ops/logaddexp_native.h>
696#include <ATen/ops/logical_and_cpu_dispatch.h>
697#include <ATen/ops/logical_and_native.h>
698#include <ATen/ops/logical_not_cpu_dispatch.h>
699#include <ATen/ops/logical_not_native.h>
700#include <ATen/ops/logical_or_cpu_dispatch.h>
701#include <ATen/ops/logical_or_native.h>
702#include <ATen/ops/logical_xor_cpu_dispatch.h>
703#include <ATen/ops/logical_xor_native.h>
704#include <ATen/ops/logit_backward_cpu_dispatch.h>
705#include <ATen/ops/logit_backward_native.h>
706#include <ATen/ops/logit_cpu_dispatch.h>
707#include <ATen/ops/logit_native.h>
708#include <ATen/ops/logspace_cpu_dispatch.h>
709#include <ATen/ops/logspace_native.h>
710#include <ATen/ops/lshift_cpu_dispatch.h>
711#include <ATen/ops/lshift_native.h>
712#include <ATen/ops/lt_cpu_dispatch.h>
713#include <ATen/ops/lt_native.h>
714#include <ATen/ops/lu_unpack_cpu_dispatch.h>
715#include <ATen/ops/lu_unpack_native.h>
716#include <ATen/ops/masked_fill_cpu_dispatch.h>
717#include <ATen/ops/masked_fill_native.h>
718#include <ATen/ops/masked_scatter_cpu_dispatch.h>
719#include <ATen/ops/masked_scatter_native.h>
720#include <ATen/ops/masked_select_cpu_dispatch.h>
721#include <ATen/ops/masked_select_native.h>
722#include <ATen/ops/max_cpu_dispatch.h>
723#include <ATen/ops/max_native.h>
724#include <ATen/ops/max_pool2d_with_indices_backward_cpu_dispatch.h>
725#include <ATen/ops/max_pool2d_with_indices_backward_native.h>
726#include <ATen/ops/max_pool2d_with_indices_cpu_dispatch.h>
727#include <ATen/ops/max_pool2d_with_indices_native.h>
728#include <ATen/ops/max_pool3d_with_indices_backward_cpu_dispatch.h>
729#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
730#include <ATen/ops/max_pool3d_with_indices_cpu_dispatch.h>
731#include <ATen/ops/max_pool3d_with_indices_native.h>
732#include <ATen/ops/max_unpool2d_cpu_dispatch.h>
733#include <ATen/ops/max_unpool2d_native.h>
734#include <ATen/ops/max_unpool3d_cpu_dispatch.h>
735#include <ATen/ops/max_unpool3d_native.h>
736#include <ATen/ops/maximum_cpu_dispatch.h>
737#include <ATen/ops/maximum_native.h>
738#include <ATen/ops/mean_cpu_dispatch.h>
739#include <ATen/ops/mean_native.h>
740#include <ATen/ops/median_cpu_dispatch.h>
741#include <ATen/ops/median_native.h>
742#include <ATen/ops/min_cpu_dispatch.h>
743#include <ATen/ops/min_native.h>
744#include <ATen/ops/minimum_cpu_dispatch.h>
745#include <ATen/ops/minimum_native.h>
746#include <ATen/ops/mish_backward_cpu_dispatch.h>
747#include <ATen/ops/mish_backward_native.h>
748#include <ATen/ops/mish_cpu_dispatch.h>
749#include <ATen/ops/mish_native.h>
750#include <ATen/ops/mkldnn_rnn_layer_backward_cpu_dispatch.h>
751#include <ATen/ops/mkldnn_rnn_layer_backward_native.h>
752#include <ATen/ops/mkldnn_rnn_layer_cpu_dispatch.h>
753#include <ATen/ops/mkldnn_rnn_layer_native.h>
754#include <ATen/ops/mm_cpu_dispatch.h>
755#include <ATen/ops/mm_native.h>
756#include <ATen/ops/mode_cpu_dispatch.h>
757#include <ATen/ops/mode_native.h>
758#include <ATen/ops/mse_loss_backward_cpu_dispatch.h>
759#include <ATen/ops/mse_loss_backward_native.h>
760#include <ATen/ops/mse_loss_cpu_dispatch.h>
761#include <ATen/ops/mse_loss_native.h>
762#include <ATen/ops/mul_cpu_dispatch.h>
763#include <ATen/ops/mul_native.h>
764#include <ATen/ops/multi_margin_loss_backward_cpu_dispatch.h>
765#include <ATen/ops/multi_margin_loss_backward_native.h>
766#include <ATen/ops/multi_margin_loss_cpu_dispatch.h>
767#include <ATen/ops/multi_margin_loss_native.h>
768#include <ATen/ops/multilabel_margin_loss_backward_cpu_dispatch.h>
769#include <ATen/ops/multilabel_margin_loss_backward_native.h>
770#include <ATen/ops/multilabel_margin_loss_forward_cpu_dispatch.h>
771#include <ATen/ops/multilabel_margin_loss_forward_native.h>
772#include <ATen/ops/multinomial_cpu_dispatch.h>
773#include <ATen/ops/multinomial_native.h>
774#include <ATen/ops/mvlgamma_cpu_dispatch.h>
775#include <ATen/ops/mvlgamma_native.h>
776#include <ATen/ops/nan_to_num_cpu_dispatch.h>
777#include <ATen/ops/nan_to_num_native.h>
778#include <ATen/ops/nanmedian_cpu_dispatch.h>
779#include <ATen/ops/nanmedian_native.h>
780#include <ATen/ops/nansum_cpu_dispatch.h>
781#include <ATen/ops/nansum_native.h>
782#include <ATen/ops/narrow_copy_cpu_dispatch.h>
783#include <ATen/ops/narrow_copy_native.h>
784#include <ATen/ops/native_batch_norm_backward_cpu_dispatch.h>
785#include <ATen/ops/native_batch_norm_backward_native.h>
786#include <ATen/ops/native_batch_norm_cpu_dispatch.h>
787#include <ATen/ops/native_batch_norm_native.h>
788#include <ATen/ops/native_channel_shuffle_cpu_dispatch.h>
789#include <ATen/ops/native_channel_shuffle_native.h>
790#include <ATen/ops/native_dropout_backward_cpu_dispatch.h>
791#include <ATen/ops/native_dropout_backward_native.h>
792#include <ATen/ops/native_dropout_cpu_dispatch.h>
793#include <ATen/ops/native_dropout_native.h>
794#include <ATen/ops/native_group_norm_backward_cpu_dispatch.h>
795#include <ATen/ops/native_group_norm_backward_native.h>
796#include <ATen/ops/native_group_norm_cpu_dispatch.h>
797#include <ATen/ops/native_group_norm_native.h>
798#include <ATen/ops/native_layer_norm_backward_cpu_dispatch.h>
799#include <ATen/ops/native_layer_norm_backward_native.h>
800#include <ATen/ops/native_layer_norm_cpu_dispatch.h>
801#include <ATen/ops/native_layer_norm_native.h>
802#include <ATen/ops/ne_cpu_dispatch.h>
803#include <ATen/ops/ne_native.h>
804#include <ATen/ops/neg_cpu_dispatch.h>
805#include <ATen/ops/neg_native.h>
806#include <ATen/ops/nextafter_cpu_dispatch.h>
807#include <ATen/ops/nextafter_native.h>
808#include <ATen/ops/nll_loss2d_backward_cpu_dispatch.h>
809#include <ATen/ops/nll_loss2d_backward_native.h>
810#include <ATen/ops/nll_loss2d_forward_cpu_dispatch.h>
811#include <ATen/ops/nll_loss2d_forward_native.h>
812#include <ATen/ops/nll_loss_backward_cpu_dispatch.h>
813#include <ATen/ops/nll_loss_backward_native.h>
814#include <ATen/ops/nll_loss_forward_cpu_dispatch.h>
815#include <ATen/ops/nll_loss_forward_native.h>
816#include <ATen/ops/nonzero_cpu_dispatch.h>
817#include <ATen/ops/nonzero_native.h>
818#include <ATen/ops/norm_cpu_dispatch.h>
819#include <ATen/ops/norm_native.h>
820#include <ATen/ops/normal_cpu_dispatch.h>
821#include <ATen/ops/normal_native.h>
822#include <ATen/ops/ormqr_cpu_dispatch.h>
823#include <ATen/ops/ormqr_native.h>
824#include <ATen/ops/pixel_shuffle_cpu_dispatch.h>
825#include <ATen/ops/pixel_shuffle_native.h>
826#include <ATen/ops/pixel_unshuffle_cpu_dispatch.h>
827#include <ATen/ops/pixel_unshuffle_native.h>
828#include <ATen/ops/poisson_cpu_dispatch.h>
829#include <ATen/ops/poisson_native.h>
830#include <ATen/ops/polar_cpu_dispatch.h>
831#include <ATen/ops/polar_native.h>
832#include <ATen/ops/polygamma_cpu_dispatch.h>
833#include <ATen/ops/polygamma_native.h>
834#include <ATen/ops/pow_cpu_dispatch.h>
835#include <ATen/ops/pow_native.h>
836#include <ATen/ops/prod_cpu_dispatch.h>
837#include <ATen/ops/prod_native.h>
838#include <ATen/ops/put_cpu_dispatch.h>
839#include <ATen/ops/put_native.h>
840#include <ATen/ops/quantize_per_channel_cpu_dispatch.h>
841#include <ATen/ops/quantize_per_channel_native.h>
842#include <ATen/ops/quantize_per_tensor_cpu_dispatch.h>
843#include <ATen/ops/quantize_per_tensor_dynamic_cpu_dispatch.h>
844#include <ATen/ops/quantize_per_tensor_dynamic_native.h>
845#include <ATen/ops/quantize_per_tensor_native.h>
846#include <ATen/ops/random_cpu_dispatch.h>
847#include <ATen/ops/random_native.h>
848#include <ATen/ops/randperm_cpu_dispatch.h>
849#include <ATen/ops/randperm_native.h>
850#include <ATen/ops/range_cpu_dispatch.h>
851#include <ATen/ops/range_native.h>
852#include <ATen/ops/reciprocal_cpu_dispatch.h>
853#include <ATen/ops/reciprocal_native.h>
854#include <ATen/ops/reflection_pad1d_backward_cpu_dispatch.h>
855#include <ATen/ops/reflection_pad1d_backward_native.h>
856#include <ATen/ops/reflection_pad1d_cpu_dispatch.h>
857#include <ATen/ops/reflection_pad1d_native.h>
858#include <ATen/ops/reflection_pad2d_backward_cpu_dispatch.h>
859#include <ATen/ops/reflection_pad2d_backward_native.h>
860#include <ATen/ops/reflection_pad2d_cpu_dispatch.h>
861#include <ATen/ops/reflection_pad2d_native.h>
862#include <ATen/ops/reflection_pad3d_backward_cpu_dispatch.h>
863#include <ATen/ops/reflection_pad3d_backward_native.h>
864#include <ATen/ops/reflection_pad3d_cpu_dispatch.h>
865#include <ATen/ops/reflection_pad3d_native.h>
866#include <ATen/ops/relu_cpu_dispatch.h>
867#include <ATen/ops/relu_native.h>
868#include <ATen/ops/remainder_cpu_dispatch.h>
869#include <ATen/ops/remainder_native.h>
870#include <ATen/ops/renorm_cpu_dispatch.h>
871#include <ATen/ops/renorm_native.h>
872#include <ATen/ops/repeat_interleave_cpu_dispatch.h>
873#include <ATen/ops/repeat_interleave_native.h>
874#include <ATen/ops/replication_pad1d_backward_cpu_dispatch.h>
875#include <ATen/ops/replication_pad1d_backward_native.h>
876#include <ATen/ops/replication_pad1d_cpu_dispatch.h>
877#include <ATen/ops/replication_pad1d_native.h>
878#include <ATen/ops/replication_pad2d_backward_cpu_dispatch.h>
879#include <ATen/ops/replication_pad2d_backward_native.h>
880#include <ATen/ops/replication_pad2d_cpu_dispatch.h>
881#include <ATen/ops/replication_pad2d_native.h>
882#include <ATen/ops/replication_pad3d_backward_cpu_dispatch.h>
883#include <ATen/ops/replication_pad3d_backward_native.h>
884#include <ATen/ops/replication_pad3d_cpu_dispatch.h>
885#include <ATen/ops/replication_pad3d_native.h>
886#include <ATen/ops/resize_cpu_dispatch.h>
887#include <ATen/ops/resize_native.h>
888#include <ATen/ops/roll_cpu_dispatch.h>
889#include <ATen/ops/roll_native.h>
890#include <ATen/ops/round_cpu_dispatch.h>
891#include <ATen/ops/round_native.h>
892#include <ATen/ops/rrelu_with_noise_cpu_dispatch.h>
893#include <ATen/ops/rrelu_with_noise_native.h>
894#include <ATen/ops/rshift_cpu_dispatch.h>
895#include <ATen/ops/rshift_native.h>
896#include <ATen/ops/rsqrt_cpu_dispatch.h>
897#include <ATen/ops/rsqrt_native.h>
898#include <ATen/ops/rsub_cpu_dispatch.h>
899#include <ATen/ops/rsub_native.h>
900#include <ATen/ops/scatter_add_cpu_dispatch.h>
901#include <ATen/ops/scatter_add_native.h>
902#include <ATen/ops/scatter_cpu_dispatch.h>
903#include <ATen/ops/scatter_native.h>
904#include <ATen/ops/scatter_reduce_cpu_dispatch.h>
905#include <ATen/ops/scatter_reduce_native.h>
906#include <ATen/ops/searchsorted_cpu_dispatch.h>
907#include <ATen/ops/searchsorted_native.h>
908#include <ATen/ops/segment_reduce_cpu_dispatch.h>
909#include <ATen/ops/segment_reduce_native.h>
910#include <ATen/ops/set_cpu_dispatch.h>
911#include <ATen/ops/set_native.h>
912#include <ATen/ops/sgn_cpu_dispatch.h>
913#include <ATen/ops/sgn_native.h>
914#include <ATen/ops/sigmoid_backward_cpu_dispatch.h>
915#include <ATen/ops/sigmoid_backward_native.h>
916#include <ATen/ops/sigmoid_cpu_dispatch.h>
917#include <ATen/ops/sigmoid_native.h>
918#include <ATen/ops/sign_cpu_dispatch.h>
919#include <ATen/ops/sign_native.h>
920#include <ATen/ops/signbit_cpu_dispatch.h>
921#include <ATen/ops/signbit_native.h>
922#include <ATen/ops/silu_backward_cpu_dispatch.h>
923#include <ATen/ops/silu_backward_native.h>
924#include <ATen/ops/silu_cpu_dispatch.h>
925#include <ATen/ops/silu_native.h>
926#include <ATen/ops/sin_cpu_dispatch.h>
927#include <ATen/ops/sin_native.h>
928#include <ATen/ops/sinc_cpu_dispatch.h>
929#include <ATen/ops/sinc_native.h>
930#include <ATen/ops/sinh_cpu_dispatch.h>
931#include <ATen/ops/sinh_native.h>
932#include <ATen/ops/slow_conv3d_forward_cpu_dispatch.h>
933#include <ATen/ops/slow_conv3d_forward_native.h>
934#include <ATen/ops/slow_conv_dilated2d_cpu_dispatch.h>
935#include <ATen/ops/slow_conv_dilated2d_native.h>
936#include <ATen/ops/slow_conv_dilated3d_cpu_dispatch.h>
937#include <ATen/ops/slow_conv_dilated3d_native.h>
938#include <ATen/ops/slow_conv_transpose2d_cpu_dispatch.h>
939#include <ATen/ops/slow_conv_transpose2d_native.h>
940#include <ATen/ops/slow_conv_transpose3d_cpu_dispatch.h>
941#include <ATen/ops/slow_conv_transpose3d_native.h>
942#include <ATen/ops/smooth_l1_loss_backward_cpu_dispatch.h>
943#include <ATen/ops/smooth_l1_loss_backward_native.h>
944#include <ATen/ops/smooth_l1_loss_cpu_dispatch.h>
945#include <ATen/ops/smooth_l1_loss_native.h>
946#include <ATen/ops/softplus_backward_cpu_dispatch.h>
947#include <ATen/ops/softplus_backward_native.h>
948#include <ATen/ops/softplus_cpu_dispatch.h>
949#include <ATen/ops/softplus_native.h>
950#include <ATen/ops/softshrink_backward_cpu_dispatch.h>
951#include <ATen/ops/softshrink_backward_native.h>
952#include <ATen/ops/softshrink_cpu_dispatch.h>
953#include <ATen/ops/softshrink_native.h>
954#include <ATen/ops/sort_cpu_dispatch.h>
955#include <ATen/ops/sort_native.h>
956#include <ATen/ops/sparse_dim_cpu_dispatch.h>
957#include <ATen/ops/sparse_dim_native.h>
958#include <ATen/ops/special_airy_ai_cpu_dispatch.h>
959#include <ATen/ops/special_airy_ai_native.h>
960#include <ATen/ops/special_bessel_j0_cpu_dispatch.h>
961#include <ATen/ops/special_bessel_j0_native.h>
962#include <ATen/ops/special_bessel_j1_cpu_dispatch.h>
963#include <ATen/ops/special_bessel_j1_native.h>
964#include <ATen/ops/special_bessel_y0_cpu_dispatch.h>
965#include <ATen/ops/special_bessel_y0_native.h>
966#include <ATen/ops/special_bessel_y1_cpu_dispatch.h>
967#include <ATen/ops/special_bessel_y1_native.h>
968#include <ATen/ops/special_chebyshev_polynomial_t_cpu_dispatch.h>
969#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
970#include <ATen/ops/special_chebyshev_polynomial_u_cpu_dispatch.h>
971#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
972#include <ATen/ops/special_chebyshev_polynomial_v_cpu_dispatch.h>
973#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
974#include <ATen/ops/special_chebyshev_polynomial_w_cpu_dispatch.h>
975#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
976#include <ATen/ops/special_entr_cpu_dispatch.h>
977#include <ATen/ops/special_entr_native.h>
978#include <ATen/ops/special_erfcx_cpu_dispatch.h>
979#include <ATen/ops/special_erfcx_native.h>
980#include <ATen/ops/special_hermite_polynomial_h_cpu_dispatch.h>
981#include <ATen/ops/special_hermite_polynomial_h_native.h>
982#include <ATen/ops/special_hermite_polynomial_he_cpu_dispatch.h>
983#include <ATen/ops/special_hermite_polynomial_he_native.h>
984#include <ATen/ops/special_i0e_cpu_dispatch.h>
985#include <ATen/ops/special_i0e_native.h>
986#include <ATen/ops/special_i1_cpu_dispatch.h>
987#include <ATen/ops/special_i1_native.h>
988#include <ATen/ops/special_i1e_cpu_dispatch.h>
989#include <ATen/ops/special_i1e_native.h>
990#include <ATen/ops/special_laguerre_polynomial_l_cpu_dispatch.h>
991#include <ATen/ops/special_laguerre_polynomial_l_native.h>
992#include <ATen/ops/special_legendre_polynomial_p_cpu_dispatch.h>
993#include <ATen/ops/special_legendre_polynomial_p_native.h>
994#include <ATen/ops/special_log_ndtr_cpu_dispatch.h>
995#include <ATen/ops/special_log_ndtr_native.h>
996#include <ATen/ops/special_modified_bessel_i0_cpu_dispatch.h>
997#include <ATen/ops/special_modified_bessel_i0_native.h>
998#include <ATen/ops/special_modified_bessel_i1_cpu_dispatch.h>
999#include <ATen/ops/special_modified_bessel_i1_native.h>
1000#include <ATen/ops/special_modified_bessel_k0_cpu_dispatch.h>
1001#include <ATen/ops/special_modified_bessel_k0_native.h>
1002#include <ATen/ops/special_modified_bessel_k1_cpu_dispatch.h>
1003#include <ATen/ops/special_modified_bessel_k1_native.h>
1004#include <ATen/ops/special_ndtri_cpu_dispatch.h>
1005#include <ATen/ops/special_ndtri_native.h>
1006#include <ATen/ops/special_scaled_modified_bessel_k0_cpu_dispatch.h>
1007#include <ATen/ops/special_scaled_modified_bessel_k0_native.h>
1008#include <ATen/ops/special_scaled_modified_bessel_k1_cpu_dispatch.h>
1009#include <ATen/ops/special_scaled_modified_bessel_k1_native.h>
1010#include <ATen/ops/special_shifted_chebyshev_polynomial_t_cpu_dispatch.h>
1011#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
1012#include <ATen/ops/special_shifted_chebyshev_polynomial_u_cpu_dispatch.h>
1013#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
1014#include <ATen/ops/special_shifted_chebyshev_polynomial_v_cpu_dispatch.h>
1015#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
1016#include <ATen/ops/special_shifted_chebyshev_polynomial_w_cpu_dispatch.h>
1017#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
1018#include <ATen/ops/special_spherical_bessel_j0_cpu_dispatch.h>
1019#include <ATen/ops/special_spherical_bessel_j0_native.h>
1020#include <ATen/ops/special_xlog1py_cpu_dispatch.h>
1021#include <ATen/ops/special_xlog1py_native.h>
1022#include <ATen/ops/special_zeta_cpu_dispatch.h>
1023#include <ATen/ops/special_zeta_native.h>
1024#include <ATen/ops/sqrt_cpu_dispatch.h>
1025#include <ATen/ops/sqrt_native.h>
1026#include <ATen/ops/sspaddmm_cpu_dispatch.h>
1027#include <ATen/ops/sspaddmm_native.h>
1028#include <ATen/ops/std_cpu_dispatch.h>
1029#include <ATen/ops/std_mean_cpu_dispatch.h>
1030#include <ATen/ops/std_mean_native.h>
1031#include <ATen/ops/std_native.h>
1032#include <ATen/ops/sub_cpu_dispatch.h>
1033#include <ATen/ops/sub_native.h>
1034#include <ATen/ops/sum_cpu_dispatch.h>
1035#include <ATen/ops/sum_native.h>
1036#include <ATen/ops/take_cpu_dispatch.h>
1037#include <ATen/ops/take_native.h>
1038#include <ATen/ops/tan_cpu_dispatch.h>
1039#include <ATen/ops/tan_native.h>
1040#include <ATen/ops/tanh_backward_cpu_dispatch.h>
1041#include <ATen/ops/tanh_backward_native.h>
1042#include <ATen/ops/tanh_cpu_dispatch.h>
1043#include <ATen/ops/tanh_native.h>
1044#include <ATen/ops/tensordot_cpu_dispatch.h>
1045#include <ATen/ops/tensordot_native.h>
1046#include <ATen/ops/threshold_backward_cpu_dispatch.h>
1047#include <ATen/ops/threshold_backward_native.h>
1048#include <ATen/ops/threshold_cpu_dispatch.h>
1049#include <ATen/ops/threshold_native.h>
1050#include <ATen/ops/to_mkldnn_cpu_dispatch.h>
1051#include <ATen/ops/to_mkldnn_native.h>
1052#include <ATen/ops/to_sparse_bsc_cpu_dispatch.h>
1053#include <ATen/ops/to_sparse_bsc_native.h>
1054#include <ATen/ops/to_sparse_bsr_cpu_dispatch.h>
1055#include <ATen/ops/to_sparse_bsr_native.h>
1056#include <ATen/ops/to_sparse_cpu_dispatch.h>
1057#include <ATen/ops/to_sparse_csc_cpu_dispatch.h>
1058#include <ATen/ops/to_sparse_csc_native.h>
1059#include <ATen/ops/to_sparse_csr_cpu_dispatch.h>
1060#include <ATen/ops/to_sparse_csr_native.h>
1061#include <ATen/ops/to_sparse_native.h>
1062#include <ATen/ops/topk_cpu_dispatch.h>
1063#include <ATen/ops/topk_native.h>
1064#include <ATen/ops/trace_cpu_dispatch.h>
1065#include <ATen/ops/trace_native.h>
1066#include <ATen/ops/triangular_solve_cpu_dispatch.h>
1067#include <ATen/ops/triangular_solve_native.h>
1068#include <ATen/ops/tril_cpu_dispatch.h>
1069#include <ATen/ops/tril_indices_cpu_dispatch.h>
1070#include <ATen/ops/tril_indices_native.h>
1071#include <ATen/ops/tril_native.h>
1072#include <ATen/ops/triu_cpu_dispatch.h>
1073#include <ATen/ops/triu_indices_cpu_dispatch.h>
1074#include <ATen/ops/triu_indices_native.h>
1075#include <ATen/ops/triu_native.h>
1076#include <ATen/ops/trunc_cpu_dispatch.h>
1077#include <ATen/ops/trunc_native.h>
1078#include <ATen/ops/unfold_backward_cpu_dispatch.h>
1079#include <ATen/ops/unfold_backward_native.h>
1080#include <ATen/ops/unfold_cpu_dispatch.h>
1081#include <ATen/ops/unfold_native.h>
1082#include <ATen/ops/uniform_cpu_dispatch.h>
1083#include <ATen/ops/uniform_native.h>
1084#include <ATen/ops/unique_consecutive_cpu_dispatch.h>
1085#include <ATen/ops/unique_consecutive_native.h>
1086#include <ATen/ops/unique_dim_consecutive_cpu_dispatch.h>
1087#include <ATen/ops/unique_dim_consecutive_native.h>
1088#include <ATen/ops/unique_dim_cpu_dispatch.h>
1089#include <ATen/ops/unique_dim_native.h>
1090#include <ATen/ops/upsample_bicubic2d_backward_cpu_dispatch.h>
1091#include <ATen/ops/upsample_bicubic2d_backward_native.h>
1092#include <ATen/ops/upsample_bicubic2d_cpu_dispatch.h>
1093#include <ATen/ops/upsample_bicubic2d_native.h>
1094#include <ATen/ops/upsample_bilinear2d_backward_cpu_dispatch.h>
1095#include <ATen/ops/upsample_bilinear2d_backward_native.h>
1096#include <ATen/ops/upsample_bilinear2d_cpu_dispatch.h>
1097#include <ATen/ops/upsample_bilinear2d_native.h>
1098#include <ATen/ops/upsample_linear1d_backward_cpu_dispatch.h>
1099#include <ATen/ops/upsample_linear1d_backward_native.h>
1100#include <ATen/ops/upsample_linear1d_cpu_dispatch.h>
1101#include <ATen/ops/upsample_linear1d_native.h>
1102#include <ATen/ops/upsample_nearest1d_backward_cpu_dispatch.h>
1103#include <ATen/ops/upsample_nearest1d_backward_native.h>
1104#include <ATen/ops/upsample_nearest1d_cpu_dispatch.h>
1105#include <ATen/ops/upsample_nearest1d_native.h>
1106#include <ATen/ops/upsample_nearest2d_backward_cpu_dispatch.h>
1107#include <ATen/ops/upsample_nearest2d_backward_native.h>
1108#include <ATen/ops/upsample_nearest2d_cpu_dispatch.h>
1109#include <ATen/ops/upsample_nearest2d_native.h>
1110#include <ATen/ops/upsample_nearest3d_backward_cpu_dispatch.h>
1111#include <ATen/ops/upsample_nearest3d_backward_native.h>
1112#include <ATen/ops/upsample_nearest3d_cpu_dispatch.h>
1113#include <ATen/ops/upsample_nearest3d_native.h>
1114#include <ATen/ops/upsample_trilinear3d_backward_cpu_dispatch.h>
1115#include <ATen/ops/upsample_trilinear3d_backward_native.h>
1116#include <ATen/ops/upsample_trilinear3d_cpu_dispatch.h>
1117#include <ATen/ops/upsample_trilinear3d_native.h>
1118#include <ATen/ops/var_cpu_dispatch.h>
1119#include <ATen/ops/var_mean_cpu_dispatch.h>
1120#include <ATen/ops/var_mean_native.h>
1121#include <ATen/ops/var_native.h>
1122#include <ATen/ops/vdot_cpu_dispatch.h>
1123#include <ATen/ops/vdot_native.h>
1124#include <ATen/ops/view_as_complex_cpu_dispatch.h>
1125#include <ATen/ops/view_as_complex_native.h>
1126#include <ATen/ops/view_as_real_cpu_dispatch.h>
1127#include <ATen/ops/view_as_real_native.h>
1128#include <ATen/ops/view_cpu_dispatch.h>
1129#include <ATen/ops/view_native.h>
1130#include <ATen/ops/where_cpu_dispatch.h>
1131#include <ATen/ops/where_native.h>
1132#include <ATen/ops/xlogy_cpu_dispatch.h>
1133#include <ATen/ops/xlogy_native.h>
1134#include <ATen/ops/zero_cpu_dispatch.h>
1135#include <ATen/ops/zero_native.h>
1136
1137// See template file RegisterDispatchDefinitions.ini
1138namespace at {
1139// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
1140// ambiguity with conflicting identifiers that may have been defined in
1141// at namespace already.
1142namespace {
1143Tensor create_out(IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
1144 if (strides.empty()) {
1145 return at::detail::empty_cpu(sizes, options);
1146 } else {
1147 return at::detail::empty_strided_cpu(sizes, strides, options);
1148 }
1149}
1150void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
1151 TORCH_CHECK(options.dtype() == out.dtype(),
1152 "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
1153 TORCH_CHECK(options.device() == out.device(),
1154 "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
1155 const bool resized = at::native::resize_output(out, sizes);
1156 // Only restride if a resize occurred; otherwise we ignore the (advisory)
1157 // strides from the meta function and directly use the output tensor's
1158 // preexisting strides
1159 if (resized) {
1160 if (!strides.empty()) {
1161 TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
1162 // TODO: avoid the redispatch here
1163 out.as_strided_(sizes, strides);
1164 } else if (options.memory_format_opt().has_value()) {
1165 out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
1166 }
1167 }
1168}
1169void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
1170 // These checks are needed on those operators that:
1171 // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
1172 // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
1173 // For other operators (e.g. 'add'), 'TensorIterator' already checks
1174 // these things separately.
1175 TORCH_CHECK(options.dtype() == self.dtype(),
1176 "Bad in-place call: ",
1177 "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
1178 TORCH_CHECK(options.device() == self.device(),
1179 "Bad in-place call: ",
1180 "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
1181 TORCH_CHECK(sizes == self.sizes(),
1182 "Bad in-place call: ",
1183 "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
1184}
1185c10::optional<Tensor> maybe_create_proxy(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
1186 if (out.strides() != strides) {
1187 return at::detail::empty_strided_cpu(sizes, strides, options);
1188 }
1189 return c10::nullopt;
1190}
1191namespace {
1192void wrapper_CPU___assert_async(const at::Tensor & self) {
1193 // No device check
1194 // DeviceGuard omitted
1195 return at::native::_assert_async_cpu(self);
1196}
1197} // anonymous namespace
1198namespace {
1199::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__native_dropout(const at::Tensor & input, double p, c10::optional<bool> train) {
1200 // No device check
1201 // DeviceGuard omitted
1202 return at::native::native_dropout_cpu(input, p, train);
1203}
1204} // anonymous namespace
1205namespace {
1206at::Tensor wrapper_CPU__native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
1207 // No device check
1208 // DeviceGuard omitted
1209 return at::native::native_dropout_backward(grad_output, mask, scale);
1210}
1211} // anonymous namespace
1212namespace {
1213at::Tensor & wrapper_CPU_out_abs_out(const at::Tensor & self, at::Tensor & out) {
1214 // No device check
1215 // DeviceGuard omitted
1216 return at::native::abs_out(self, out);
1217}
1218} // anonymous namespace
1219namespace {
1220at::Tensor wrapper_CPU__angle(const at::Tensor & self) {
1221 // No device check
1222 // DeviceGuard omitted
1223 return at::native::angle(self);
1224}
1225} // anonymous namespace
1226namespace {
1227at::Tensor & wrapper_CPU_out_angle_out(const at::Tensor & self, at::Tensor & out) {
1228 // No device check
1229 // DeviceGuard omitted
1230 return at::native::angle_out(self, out);
1231}
1232} // anonymous namespace
1233namespace {
1234at::Tensor wrapper_CPU__view_as_real(const at::Tensor & self) {
1235 // No device check
1236 // DeviceGuard omitted
1237 return at::native::view_as_real(self);
1238}
1239} // anonymous namespace
1240namespace {
1241at::Tensor wrapper_CPU__view_as_complex(const at::Tensor & self) {
1242 // No device check
1243 // DeviceGuard omitted
1244 return at::native::view_as_complex(self);
1245}
1246} // anonymous namespace
1247struct structured_sgn_out_functional final : public at::native::structured_sgn_out {
1248 void set_output_strided(
1249 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1250 TensorOptions options, DimnameList names
1251 ) override {
1252 outputs_[output_idx] = create_out(sizes, strides, options);
1253 if (!names.empty()) {
1254 namedinference::propagate_names(*outputs_[output_idx], names);
1255 }
1256 // super must happen after, so that downstream can use maybe_get_output
1257 // to retrieve the output
1258 at::native::structured_sgn_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
1259 }
1260 void set_output_raw_strided(
1261 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1262 TensorOptions options, DimnameList names
1263 ) override {
1264 outputs_[output_idx] = create_out(sizes, strides, options);
1265 if (!names.empty()) {
1266 namedinference::propagate_names(*outputs_[output_idx], names);
1267 }
1268 // super must happen after, so that downstream can use maybe_get_output
1269 // to retrieve the output
1270 at::native::structured_sgn_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
1271 }
1272 const Tensor& maybe_get_output(int64_t output_idx) override {
1273 return *outputs_[output_idx];
1274 }
1275 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1276};
1277at::Tensor wrapper_CPU_sgn(const at::Tensor & self) {
1278structured_sgn_out_functional op;
1279op.meta(self);
1280op.impl(self, *op.outputs_[0]);
1281return std::move(op.outputs_[0]).take();
1282}
1283struct structured_sgn_out_out final : public at::native::structured_sgn_out {
1284 structured_sgn_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
1285 void set_output_strided(
1286 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1287 TensorOptions options, DimnameList names
1288 ) override {
1289 const auto& out = outputs_[output_idx].get();
1290 resize_out(out, sizes, strides, options);
1291 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1292 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1293 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1294 }
1295 if (!names.empty()) {
1296 namedinference::propagate_names(outputs_[output_idx], names);
1297 }
1298 // super must happen after, so that downstream can use maybe_get_output
1299 // to retrieve the output
1300 at::native::structured_sgn_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
1301 }
1302 void set_output_raw_strided(
1303 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1304 TensorOptions options, DimnameList names
1305 ) override {
1306 const auto& out = outputs_[output_idx].get();
1307 resize_out(out, sizes, strides, options);
1308 if (!names.empty()) {
1309 namedinference::propagate_names(outputs_[output_idx], names);
1310 }
1311 // super must happen after, so that downstream can use maybe_get_output
1312 // to retrieve the output
1313 at::native::structured_sgn_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
1314 }
1315 const Tensor& maybe_get_output(int64_t output_idx) override {
1316 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1317 }
1318 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1319 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1320};
1321at::Tensor & wrapper_CPU_sgn_out_out(const at::Tensor & self, at::Tensor & out) {
1322structured_sgn_out_out op(out);
1323op.meta(self);
1324op.impl(self, op.maybe_get_output(0));
1325if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1326return out;
1327}
1328struct structured_sgn_out_inplace final : public at::native::structured_sgn_out {
1329 structured_sgn_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
1330 void set_output_strided(
1331 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1332 TensorOptions options, DimnameList names
1333 ) override {
1334 const auto& out = outputs_[output_idx].get();
1335 check_inplace(out, sizes, options);
1336 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1337 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1338 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1339 }
1340 if (!names.empty()) {
1341 namedinference::propagate_names(outputs_[output_idx], names);
1342 }
1343 // super must happen after, so that downstream can use maybe_get_output
1344 // to retrieve the output
1345 at::native::structured_sgn_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
1346 }
1347 void set_output_raw_strided(
1348 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1349 TensorOptions options, DimnameList names
1350 ) override {
1351 const auto& out = outputs_[output_idx].get();
1352 check_inplace(out, sizes, options);
1353 if (!names.empty()) {
1354 namedinference::propagate_names(outputs_[output_idx], names);
1355 }
1356 // super must happen after, so that downstream can use maybe_get_output
1357 // to retrieve the output
1358 at::native::structured_sgn_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
1359 }
1360 const Tensor& maybe_get_output(int64_t output_idx) override {
1361 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1362 }
1363 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1364 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1365};
1366at::Tensor & wrapper_CPU_sgn_(at::Tensor & self) {
1367structured_sgn_out_inplace op(self);
1368op.meta(self);
1369op.impl(self, op.outputs_[0]);
1370if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1371return self;
1372}
1373namespace {
1374at::Tensor & wrapper_CPU_out_conj_physical_out(const at::Tensor & self, at::Tensor & out) {
1375 // No device check
1376 // DeviceGuard omitted
1377 return at::native::conj_physical_out(self, out);
1378}
1379} // anonymous namespace
1380struct structured_acos_out_functional final : public at::native::structured_acos_out {
1381 void set_output_strided(
1382 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1383 TensorOptions options, DimnameList names
1384 ) override {
1385 outputs_[output_idx] = create_out(sizes, strides, options);
1386 if (!names.empty()) {
1387 namedinference::propagate_names(*outputs_[output_idx], names);
1388 }
1389 // super must happen after, so that downstream can use maybe_get_output
1390 // to retrieve the output
1391 at::native::structured_acos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
1392 }
1393 void set_output_raw_strided(
1394 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1395 TensorOptions options, DimnameList names
1396 ) override {
1397 outputs_[output_idx] = create_out(sizes, strides, options);
1398 if (!names.empty()) {
1399 namedinference::propagate_names(*outputs_[output_idx], names);
1400 }
1401 // super must happen after, so that downstream can use maybe_get_output
1402 // to retrieve the output
1403 at::native::structured_acos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
1404 }
1405 const Tensor& maybe_get_output(int64_t output_idx) override {
1406 return *outputs_[output_idx];
1407 }
1408 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1409};
1410at::Tensor wrapper_CPU_acos(const at::Tensor & self) {
1411structured_acos_out_functional op;
1412op.meta(self);
1413op.impl(self, *op.outputs_[0]);
1414return std::move(op.outputs_[0]).take();
1415}
1416struct structured_acos_out_out final : public at::native::structured_acos_out {
1417 structured_acos_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
1418 void set_output_strided(
1419 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1420 TensorOptions options, DimnameList names
1421 ) override {
1422 const auto& out = outputs_[output_idx].get();
1423 resize_out(out, sizes, strides, options);
1424 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1425 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1426 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1427 }
1428 if (!names.empty()) {
1429 namedinference::propagate_names(outputs_[output_idx], names);
1430 }
1431 // super must happen after, so that downstream can use maybe_get_output
1432 // to retrieve the output
1433 at::native::structured_acos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
1434 }
1435 void set_output_raw_strided(
1436 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1437 TensorOptions options, DimnameList names
1438 ) override {
1439 const auto& out = outputs_[output_idx].get();
1440 resize_out(out, sizes, strides, options);
1441 if (!names.empty()) {
1442 namedinference::propagate_names(outputs_[output_idx], names);
1443 }
1444 // super must happen after, so that downstream can use maybe_get_output
1445 // to retrieve the output
1446 at::native::structured_acos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
1447 }
1448 const Tensor& maybe_get_output(int64_t output_idx) override {
1449 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1450 }
1451 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1452 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1453};
1454at::Tensor & wrapper_CPU_acos_out_out(const at::Tensor & self, at::Tensor & out) {
1455structured_acos_out_out op(out);
1456op.meta(self);
1457op.impl(self, op.maybe_get_output(0));
1458if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1459return out;
1460}
1461struct structured_acos_out_inplace final : public at::native::structured_acos_out {
1462 structured_acos_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
1463 void set_output_strided(
1464 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1465 TensorOptions options, DimnameList names
1466 ) override {
1467 const auto& out = outputs_[output_idx].get();
1468 check_inplace(out, sizes, options);
1469 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1470 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1471 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1472 }
1473 if (!names.empty()) {
1474 namedinference::propagate_names(outputs_[output_idx], names);
1475 }
1476 // super must happen after, so that downstream can use maybe_get_output
1477 // to retrieve the output
1478 at::native::structured_acos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
1479 }
1480 void set_output_raw_strided(
1481 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1482 TensorOptions options, DimnameList names
1483 ) override {
1484 const auto& out = outputs_[output_idx].get();
1485 check_inplace(out, sizes, options);
1486 if (!names.empty()) {
1487 namedinference::propagate_names(outputs_[output_idx], names);
1488 }
1489 // super must happen after, so that downstream can use maybe_get_output
1490 // to retrieve the output
1491 at::native::structured_acos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
1492 }
1493 const Tensor& maybe_get_output(int64_t output_idx) override {
1494 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1495 }
1496 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1497 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1498};
1499at::Tensor & wrapper_CPU_acos_(at::Tensor & self) {
1500structured_acos_out_inplace op(self);
1501op.meta(self);
1502op.impl(self, op.outputs_[0]);
1503if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1504return self;
1505}
1506struct structured_ufunc_add_CPU_functional final : public at::native::structured_ufunc_add_CPU {
1507 void set_output_strided(
1508 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1509 TensorOptions options, DimnameList names
1510 ) override {
1511 outputs_[output_idx] = create_out(sizes, strides, options);
1512 if (!names.empty()) {
1513 namedinference::propagate_names(*outputs_[output_idx], names);
1514 }
1515 // super must happen after, so that downstream can use maybe_get_output
1516 // to retrieve the output
1517 at::native::structured_ufunc_add_CPU::set_output_raw_strided(output_idx, sizes, strides, options, names);
1518 }
1519 void set_output_raw_strided(
1520 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1521 TensorOptions options, DimnameList names
1522 ) override {
1523 outputs_[output_idx] = create_out(sizes, strides, options);
1524 if (!names.empty()) {
1525 namedinference::propagate_names(*outputs_[output_idx], names);
1526 }
1527 // super must happen after, so that downstream can use maybe_get_output
1528 // to retrieve the output
1529 at::native::structured_ufunc_add_CPU::set_output_raw_strided(output_idx, sizes, strides, options, names);
1530 }
1531 const Tensor& maybe_get_output(int64_t output_idx) override {
1532 return *outputs_[output_idx];
1533 }
1534 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1535};
1536at::Tensor wrapper_CPU_add_Tensor(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1537structured_ufunc_add_CPU_functional op;
1538op.meta(self, other, alpha);
1539op.impl(self, other, alpha, *op.outputs_[0]);
1540return std::move(op.outputs_[0]).take();
1541}
1542struct structured_ufunc_add_CPU_out final : public at::native::structured_ufunc_add_CPU {
1543 structured_ufunc_add_CPU_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
1544 void set_output_strided(
1545 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1546 TensorOptions options, DimnameList names
1547 ) override {
1548 const auto& out = outputs_[output_idx].get();
1549 resize_out(out, sizes, strides, options);
1550 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1551 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1552 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1553 }
1554 if (!names.empty()) {
1555 namedinference::propagate_names(outputs_[output_idx], names);
1556 }
1557 // super must happen after, so that downstream can use maybe_get_output
1558 // to retrieve the output
1559 at::native::structured_ufunc_add_CPU::set_output_raw_strided(output_idx, sizes, strides, options, names);
1560 }
1561 void set_output_raw_strided(
1562 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1563 TensorOptions options, DimnameList names
1564 ) override {
1565 const auto& out = outputs_[output_idx].get();
1566 resize_out(out, sizes, strides, options);
1567 if (!names.empty()) {
1568 namedinference::propagate_names(outputs_[output_idx], names);
1569 }
1570 // super must happen after, so that downstream can use maybe_get_output
1571 // to retrieve the output
1572 at::native::structured_ufunc_add_CPU::set_output_raw_strided(output_idx, sizes, strides, options, names);
1573 }
1574 const Tensor& maybe_get_output(int64_t output_idx) override {
1575 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1576 }
1577 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1578 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1579};
1580at::Tensor & wrapper_CPU_add_out_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
1581structured_ufunc_add_CPU_out op(out);
1582op.meta(self, other, alpha);
1583op.impl(self, other, alpha, op.maybe_get_output(0));
1584if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1585return out;
1586}
1587struct structured_ufunc_add_CPU_inplace final : public at::native::structured_ufunc_add_CPU {
1588 structured_ufunc_add_CPU_inplace(Tensor& self) : outputs_{std::ref(self)} {}
1589 void set_output_strided(
1590 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1591 TensorOptions options, DimnameList names
1592 ) override {
1593 const auto& out = outputs_[output_idx].get();
1594 check_inplace(out, sizes, options);
1595 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1596 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1597 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1598 }
1599 if (!names.empty()) {
1600 namedinference::propagate_names(outputs_[output_idx], names);
1601 }
1602 // super must happen after, so that downstream can use maybe_get_output
1603 // to retrieve the output
1604 at::native::structured_ufunc_add_CPU::set_output_raw_strided(output_idx, sizes, strides, options, names);
1605 }
1606 void set_output_raw_strided(
1607 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1608 TensorOptions options, DimnameList names
1609 ) override {
1610 const auto& out = outputs_[output_idx].get();
1611 check_inplace(out, sizes, options);
1612 if (!names.empty()) {
1613 namedinference::propagate_names(outputs_[output_idx], names);
1614 }
1615 // super must happen after, so that downstream can use maybe_get_output
1616 // to retrieve the output
1617 at::native::structured_ufunc_add_CPU::set_output_raw_strided(output_idx, sizes, strides, options, names);
1618 }
1619 const Tensor& maybe_get_output(int64_t output_idx) override {
1620 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1621 }
1622 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1623 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1624};
1625at::Tensor & wrapper_CPU_add__Tensor(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1626structured_ufunc_add_CPU_inplace op(self);
1627op.meta(self, other, alpha);
1628op.impl(self, other, alpha, op.outputs_[0]);
1629if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1630return self;
1631}
1632namespace {
1633at::Tensor wrapper_CPU_Tensor__add_relu(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1634 // No device check
1635 // DeviceGuard omitted
1636 return at::native::add_relu(self, other, alpha);
1637}
1638} // anonymous namespace
1639namespace {
1640at::Tensor & wrapper_CPU_out__add_relu_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
1641 // No device check
1642 // DeviceGuard omitted
1643 return at::native::add_relu_out(self, other, alpha, out);
1644}
1645} // anonymous namespace
1646namespace {
1647at::Tensor & wrapper_CPU_Tensor__add_relu_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1648 // No device check
1649 // DeviceGuard omitted
1650 return at::native::add_relu_(self, other, alpha);
1651}
1652} // anonymous namespace
1653namespace {
1654at::Tensor wrapper_CPU_Scalar__add_relu(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
1655 // No device check
1656 // DeviceGuard omitted
1657 return at::native::add_relu(self, other, alpha);
1658}
1659} // anonymous namespace
1660namespace {
1661at::Tensor & wrapper_CPU_Scalar__add_relu_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
1662 // No device check
1663 // DeviceGuard omitted
1664 return at::native::add_relu_(self, other, alpha);
1665}
1666} // anonymous namespace
1667struct structured_addmv_out_cpu_functional final : public at::native::structured_addmv_out_cpu {
1668 void set_output_strided(
1669 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1670 TensorOptions options, DimnameList names
1671 ) override {
1672 outputs_[output_idx] = create_out(sizes, strides, options);
1673 if (!names.empty()) {
1674 namedinference::propagate_names(*outputs_[output_idx], names);
1675 }
1676 // super must happen after, so that downstream can use maybe_get_output
1677 // to retrieve the output
1678 }
1679 void set_output_raw_strided(
1680 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1681 TensorOptions options, DimnameList names
1682 ) override {
1683 outputs_[output_idx] = create_out(sizes, strides, options);
1684 if (!names.empty()) {
1685 namedinference::propagate_names(*outputs_[output_idx], names);
1686 }
1687 // super must happen after, so that downstream can use maybe_get_output
1688 // to retrieve the output
1689 }
1690 const Tensor& maybe_get_output(int64_t output_idx) override {
1691 return *outputs_[output_idx];
1692 }
1693 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1694};
1695at::Tensor wrapper_CPU_addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1696structured_addmv_out_cpu_functional op;
1697op.meta(self, mat, vec, beta, alpha);
1698op.impl(self, mat, vec, beta, alpha, *op.outputs_[0]);
1699return std::move(op.outputs_[0]).take();
1700}
1701struct structured_addmv_out_cpu_out final : public at::native::structured_addmv_out_cpu {
1702 structured_addmv_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
1703 void set_output_strided(
1704 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1705 TensorOptions options, DimnameList names
1706 ) override {
1707 const auto& out = outputs_[output_idx].get();
1708 resize_out(out, sizes, strides, options);
1709 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1710 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1711 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1712 }
1713 if (!names.empty()) {
1714 namedinference::propagate_names(outputs_[output_idx], names);
1715 }
1716 // super must happen after, so that downstream can use maybe_get_output
1717 // to retrieve the output
1718 }
1719 void set_output_raw_strided(
1720 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1721 TensorOptions options, DimnameList names
1722 ) override {
1723 const auto& out = outputs_[output_idx].get();
1724 resize_out(out, sizes, strides, options);
1725 if (!names.empty()) {
1726 namedinference::propagate_names(outputs_[output_idx], names);
1727 }
1728 // super must happen after, so that downstream can use maybe_get_output
1729 // to retrieve the output
1730 }
1731 const Tensor& maybe_get_output(int64_t output_idx) override {
1732 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1733 }
1734 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1735 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1736};
1737at::Tensor & wrapper_CPU_addmv_out_out(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1738structured_addmv_out_cpu_out op(out);
1739op.meta(self, mat, vec, beta, alpha);
1740op.impl(self, mat, vec, beta, alpha, op.maybe_get_output(0));
1741if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1742return out;
1743}
1744struct structured_addmv_out_cpu_inplace final : public at::native::structured_addmv_out_cpu {
1745 structured_addmv_out_cpu_inplace(Tensor& self) : outputs_{std::ref(self)} {}
1746 void set_output_strided(
1747 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1748 TensorOptions options, DimnameList names
1749 ) override {
1750 const auto& out = outputs_[output_idx].get();
1751 check_inplace(out, sizes, options);
1752 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1753 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1754 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1755 }
1756 if (!names.empty()) {
1757 namedinference::propagate_names(outputs_[output_idx], names);
1758 }
1759 // super must happen after, so that downstream can use maybe_get_output
1760 // to retrieve the output
1761 }
1762 void set_output_raw_strided(
1763 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1764 TensorOptions options, DimnameList names
1765 ) override {
1766 const auto& out = outputs_[output_idx].get();
1767 check_inplace(out, sizes, options);
1768 if (!names.empty()) {
1769 namedinference::propagate_names(outputs_[output_idx], names);
1770 }
1771 // super must happen after, so that downstream can use maybe_get_output
1772 // to retrieve the output
1773 }
1774 const Tensor& maybe_get_output(int64_t output_idx) override {
1775 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1776 }
1777 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1778 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1779};
1780at::Tensor & wrapper_CPU_addmv_(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1781structured_addmv_out_cpu_inplace op(self);
1782op.meta(self, mat, vec, beta, alpha);
1783op.impl(self, mat, vec, beta, alpha, op.outputs_[0]);
1784if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1785return self;
1786}
1787namespace {
1788at::Tensor wrapper_CPU__addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
1789 // No device check
1790 // DeviceGuard omitted
1791 return at::native::addr(self, vec1, vec2, beta, alpha);
1792}
1793} // anonymous namespace
1794namespace {
1795at::Tensor & wrapper_CPU_out_addr_out(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1796 // No device check
1797 // DeviceGuard omitted
1798 return at::native::addr_out(self, vec1, vec2, beta, alpha, out);
1799}
1800} // anonymous namespace
1801struct structured_all_out_functional final : public at::native::structured_all_out {
1802 void set_output_strided(
1803 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1804 TensorOptions options, DimnameList names
1805 ) override {
1806 outputs_[output_idx] = create_out(sizes, strides, options);
1807 if (!names.empty()) {
1808 namedinference::propagate_names(*outputs_[output_idx], names);
1809 }
1810 // super must happen after, so that downstream can use maybe_get_output
1811 // to retrieve the output
1812 }
1813 void set_output_raw_strided(
1814 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1815 TensorOptions options, DimnameList names
1816 ) override {
1817 outputs_[output_idx] = create_out(sizes, strides, options);
1818 if (!names.empty()) {
1819 namedinference::propagate_names(*outputs_[output_idx], names);
1820 }
1821 // super must happen after, so that downstream can use maybe_get_output
1822 // to retrieve the output
1823 }
1824 const Tensor& maybe_get_output(int64_t output_idx) override {
1825 return *outputs_[output_idx];
1826 }
1827 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1828};
1829at::Tensor wrapper_CPU_all_dim(const at::Tensor & self, int64_t dim, bool keepdim) {
1830structured_all_out_functional op;
1831auto precompute = op.meta(self, dim, keepdim);
1832(void)precompute;
1833op.impl(self, precompute.dim, keepdim, *op.outputs_[0]);
1834return std::move(op.outputs_[0]).take();
1835}
1836struct structured_all_out_out final : public at::native::structured_all_out {
1837 structured_all_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
1838 void set_output_strided(
1839 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1840 TensorOptions options, DimnameList names
1841 ) override {
1842 const auto& out = outputs_[output_idx].get();
1843 resize_out(out, sizes, strides, options);
1844 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1845 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1846 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1847 }
1848 if (!names.empty()) {
1849 namedinference::propagate_names(outputs_[output_idx], names);
1850 }
1851 // super must happen after, so that downstream can use maybe_get_output
1852 // to retrieve the output
1853 }
1854 void set_output_raw_strided(
1855 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1856 TensorOptions options, DimnameList names
1857 ) override {
1858 const auto& out = outputs_[output_idx].get();
1859 resize_out(out, sizes, strides, options);
1860 if (!names.empty()) {
1861 namedinference::propagate_names(outputs_[output_idx], names);
1862 }
1863 // super must happen after, so that downstream can use maybe_get_output
1864 // to retrieve the output
1865 }
1866 const Tensor& maybe_get_output(int64_t output_idx) override {
1867 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1868 }
1869 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1870 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1871};
1872at::Tensor & wrapper_CPU_all_out_out(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
1873structured_all_out_out op(out);
1874auto precompute = op.meta(self, dim, keepdim);
1875(void)precompute;
1876op.impl(self, precompute.dim, keepdim, op.maybe_get_output(0));
1877if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1878return out;
1879}
1880struct structured_any_out_functional final : public at::native::structured_any_out {
1881 void set_output_strided(
1882 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1883 TensorOptions options, DimnameList names
1884 ) override {
1885 outputs_[output_idx] = create_out(sizes, strides, options);
1886 if (!names.empty()) {
1887 namedinference::propagate_names(*outputs_[output_idx], names);
1888 }
1889 // super must happen after, so that downstream can use maybe_get_output
1890 // to retrieve the output
1891 }
1892 void set_output_raw_strided(
1893 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1894 TensorOptions options, DimnameList names
1895 ) override {
1896 outputs_[output_idx] = create_out(sizes, strides, options);
1897 if (!names.empty()) {
1898 namedinference::propagate_names(*outputs_[output_idx], names);
1899 }
1900 // super must happen after, so that downstream can use maybe_get_output
1901 // to retrieve the output
1902 }
1903 const Tensor& maybe_get_output(int64_t output_idx) override {
1904 return *outputs_[output_idx];
1905 }
1906 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1907};
1908at::Tensor wrapper_CPU_any_dim(const at::Tensor & self, int64_t dim, bool keepdim) {
1909structured_any_out_functional op;
1910auto precompute = op.meta(self, dim, keepdim);
1911(void)precompute;
1912op.impl(self, precompute.dim, keepdim, *op.outputs_[0]);
1913return std::move(op.outputs_[0]).take();
1914}
1915struct structured_any_out_out final : public at::native::structured_any_out {
1916 structured_any_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
1917 void set_output_strided(
1918 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1919 TensorOptions options, DimnameList names
1920 ) override {
1921 const auto& out = outputs_[output_idx].get();
1922 resize_out(out, sizes, strides, options);
1923 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
1924 if (C10_UNLIKELY(maybe_proxy.has_value())) {
1925 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
1926 }
1927 if (!names.empty()) {
1928 namedinference::propagate_names(outputs_[output_idx], names);
1929 }
1930 // super must happen after, so that downstream can use maybe_get_output
1931 // to retrieve the output
1932 }
1933 void set_output_raw_strided(
1934 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1935 TensorOptions options, DimnameList names
1936 ) override {
1937 const auto& out = outputs_[output_idx].get();
1938 resize_out(out, sizes, strides, options);
1939 if (!names.empty()) {
1940 namedinference::propagate_names(outputs_[output_idx], names);
1941 }
1942 // super must happen after, so that downstream can use maybe_get_output
1943 // to retrieve the output
1944 }
1945 const Tensor& maybe_get_output(int64_t output_idx) override {
1946 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
1947 }
1948 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
1949 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
1950};
1951at::Tensor & wrapper_CPU_any_out_out(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
1952structured_any_out_out op(out);
1953auto precompute = op.meta(self, dim, keepdim);
1954(void)precompute;
1955op.impl(self, precompute.dim, keepdim, op.maybe_get_output(0));
1956if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
1957return out;
1958}
1959namespace {
1960at::Tensor & wrapper_CPU_start_out_arange_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
1961 // No device check
1962 // DeviceGuard omitted
1963 return at::native::arange_out(start, end, step, out);
1964}
1965} // anonymous namespace
1966struct structured_argmax_out_functional final : public at::native::structured_argmax_out {
1967 void set_output_strided(
1968 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1969 TensorOptions options, DimnameList names
1970 ) override {
1971 outputs_[output_idx] = create_out(sizes, strides, options);
1972 if (!names.empty()) {
1973 namedinference::propagate_names(*outputs_[output_idx], names);
1974 }
1975 // super must happen after, so that downstream can use maybe_get_output
1976 // to retrieve the output
1977 }
1978 void set_output_raw_strided(
1979 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
1980 TensorOptions options, DimnameList names
1981 ) override {
1982 outputs_[output_idx] = create_out(sizes, strides, options);
1983 if (!names.empty()) {
1984 namedinference::propagate_names(*outputs_[output_idx], names);
1985 }
1986 // super must happen after, so that downstream can use maybe_get_output
1987 // to retrieve the output
1988 }
1989 const Tensor& maybe_get_output(int64_t output_idx) override {
1990 return *outputs_[output_idx];
1991 }
1992 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
1993};
1994at::Tensor wrapper_CPU_argmax(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
1995structured_argmax_out_functional op;
1996op.meta(self, dim, keepdim);
1997op.impl(self, dim, keepdim, *op.outputs_[0]);
1998return std::move(op.outputs_[0]).take();
1999}
2000struct structured_argmax_out_out final : public at::native::structured_argmax_out {
2001 structured_argmax_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
2002 void set_output_strided(
2003 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2004 TensorOptions options, DimnameList names
2005 ) override {
2006 const auto& out = outputs_[output_idx].get();
2007 resize_out(out, sizes, strides, options);
2008 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2009 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2010 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2011 }
2012 if (!names.empty()) {
2013 namedinference::propagate_names(outputs_[output_idx], names);
2014 }
2015 // super must happen after, so that downstream can use maybe_get_output
2016 // to retrieve the output
2017 }
2018 void set_output_raw_strided(
2019 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2020 TensorOptions options, DimnameList names
2021 ) override {
2022 const auto& out = outputs_[output_idx].get();
2023 resize_out(out, sizes, strides, options);
2024 if (!names.empty()) {
2025 namedinference::propagate_names(outputs_[output_idx], names);
2026 }
2027 // super must happen after, so that downstream can use maybe_get_output
2028 // to retrieve the output
2029 }
2030 const Tensor& maybe_get_output(int64_t output_idx) override {
2031 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2032 }
2033 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2034 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2035};
2036at::Tensor & wrapper_CPU_argmax_out_out(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
2037structured_argmax_out_out op(out);
2038op.meta(self, dim, keepdim);
2039op.impl(self, dim, keepdim, op.maybe_get_output(0));
2040if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2041return out;
2042}
2043struct structured_argmin_out_functional final : public at::native::structured_argmin_out {
2044 void set_output_strided(
2045 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2046 TensorOptions options, DimnameList names
2047 ) override {
2048 outputs_[output_idx] = create_out(sizes, strides, options);
2049 if (!names.empty()) {
2050 namedinference::propagate_names(*outputs_[output_idx], names);
2051 }
2052 // super must happen after, so that downstream can use maybe_get_output
2053 // to retrieve the output
2054 }
2055 void set_output_raw_strided(
2056 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2057 TensorOptions options, DimnameList names
2058 ) override {
2059 outputs_[output_idx] = create_out(sizes, strides, options);
2060 if (!names.empty()) {
2061 namedinference::propagate_names(*outputs_[output_idx], names);
2062 }
2063 // super must happen after, so that downstream can use maybe_get_output
2064 // to retrieve the output
2065 }
2066 const Tensor& maybe_get_output(int64_t output_idx) override {
2067 return *outputs_[output_idx];
2068 }
2069 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2070};
2071at::Tensor wrapper_CPU_argmin(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
2072structured_argmin_out_functional op;
2073op.meta(self, dim, keepdim);
2074op.impl(self, dim, keepdim, *op.outputs_[0]);
2075return std::move(op.outputs_[0]).take();
2076}
2077struct structured_argmin_out_out final : public at::native::structured_argmin_out {
2078 structured_argmin_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
2079 void set_output_strided(
2080 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2081 TensorOptions options, DimnameList names
2082 ) override {
2083 const auto& out = outputs_[output_idx].get();
2084 resize_out(out, sizes, strides, options);
2085 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2086 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2087 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2088 }
2089 if (!names.empty()) {
2090 namedinference::propagate_names(outputs_[output_idx], names);
2091 }
2092 // super must happen after, so that downstream can use maybe_get_output
2093 // to retrieve the output
2094 }
2095 void set_output_raw_strided(
2096 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2097 TensorOptions options, DimnameList names
2098 ) override {
2099 const auto& out = outputs_[output_idx].get();
2100 resize_out(out, sizes, strides, options);
2101 if (!names.empty()) {
2102 namedinference::propagate_names(outputs_[output_idx], names);
2103 }
2104 // super must happen after, so that downstream can use maybe_get_output
2105 // to retrieve the output
2106 }
2107 const Tensor& maybe_get_output(int64_t output_idx) override {
2108 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2109 }
2110 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2111 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2112};
2113at::Tensor & wrapper_CPU_argmin_out_out(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
2114structured_argmin_out_out op(out);
2115op.meta(self, dim, keepdim);
2116op.impl(self, dim, keepdim, op.maybe_get_output(0));
2117if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2118return out;
2119}
2120struct structured_acosh_out_functional final : public at::native::structured_acosh_out {
2121 void set_output_strided(
2122 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2123 TensorOptions options, DimnameList names
2124 ) override {
2125 outputs_[output_idx] = create_out(sizes, strides, options);
2126 if (!names.empty()) {
2127 namedinference::propagate_names(*outputs_[output_idx], names);
2128 }
2129 // super must happen after, so that downstream can use maybe_get_output
2130 // to retrieve the output
2131 at::native::structured_acosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2132 }
2133 void set_output_raw_strided(
2134 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2135 TensorOptions options, DimnameList names
2136 ) override {
2137 outputs_[output_idx] = create_out(sizes, strides, options);
2138 if (!names.empty()) {
2139 namedinference::propagate_names(*outputs_[output_idx], names);
2140 }
2141 // super must happen after, so that downstream can use maybe_get_output
2142 // to retrieve the output
2143 at::native::structured_acosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2144 }
2145 const Tensor& maybe_get_output(int64_t output_idx) override {
2146 return *outputs_[output_idx];
2147 }
2148 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2149};
2150at::Tensor wrapper_CPU_acosh(const at::Tensor & self) {
2151structured_acosh_out_functional op;
2152op.meta(self);
2153op.impl(self, *op.outputs_[0]);
2154return std::move(op.outputs_[0]).take();
2155}
2156struct structured_acosh_out_out final : public at::native::structured_acosh_out {
2157 structured_acosh_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
2158 void set_output_strided(
2159 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2160 TensorOptions options, DimnameList names
2161 ) override {
2162 const auto& out = outputs_[output_idx].get();
2163 resize_out(out, sizes, strides, options);
2164 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2165 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2166 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2167 }
2168 if (!names.empty()) {
2169 namedinference::propagate_names(outputs_[output_idx], names);
2170 }
2171 // super must happen after, so that downstream can use maybe_get_output
2172 // to retrieve the output
2173 at::native::structured_acosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2174 }
2175 void set_output_raw_strided(
2176 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2177 TensorOptions options, DimnameList names
2178 ) override {
2179 const auto& out = outputs_[output_idx].get();
2180 resize_out(out, sizes, strides, options);
2181 if (!names.empty()) {
2182 namedinference::propagate_names(outputs_[output_idx], names);
2183 }
2184 // super must happen after, so that downstream can use maybe_get_output
2185 // to retrieve the output
2186 at::native::structured_acosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2187 }
2188 const Tensor& maybe_get_output(int64_t output_idx) override {
2189 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2190 }
2191 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2192 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2193};
2194at::Tensor & wrapper_CPU_acosh_out_out(const at::Tensor & self, at::Tensor & out) {
2195structured_acosh_out_out op(out);
2196op.meta(self);
2197op.impl(self, op.maybe_get_output(0));
2198if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2199return out;
2200}
2201struct structured_acosh_out_inplace final : public at::native::structured_acosh_out {
2202 structured_acosh_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2203 void set_output_strided(
2204 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2205 TensorOptions options, DimnameList names
2206 ) override {
2207 const auto& out = outputs_[output_idx].get();
2208 check_inplace(out, sizes, options);
2209 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2210 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2211 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2212 }
2213 if (!names.empty()) {
2214 namedinference::propagate_names(outputs_[output_idx], names);
2215 }
2216 // super must happen after, so that downstream can use maybe_get_output
2217 // to retrieve the output
2218 at::native::structured_acosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2219 }
2220 void set_output_raw_strided(
2221 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2222 TensorOptions options, DimnameList names
2223 ) override {
2224 const auto& out = outputs_[output_idx].get();
2225 check_inplace(out, sizes, options);
2226 if (!names.empty()) {
2227 namedinference::propagate_names(outputs_[output_idx], names);
2228 }
2229 // super must happen after, so that downstream can use maybe_get_output
2230 // to retrieve the output
2231 at::native::structured_acosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2232 }
2233 const Tensor& maybe_get_output(int64_t output_idx) override {
2234 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2235 }
2236 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2237 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2238};
2239at::Tensor & wrapper_CPU_acosh_(at::Tensor & self) {
2240structured_acosh_out_inplace op(self);
2241op.meta(self);
2242op.impl(self, op.outputs_[0]);
2243if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2244return self;
2245}
2246struct structured_asinh_out_functional final : public at::native::structured_asinh_out {
2247 void set_output_strided(
2248 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2249 TensorOptions options, DimnameList names
2250 ) override {
2251 outputs_[output_idx] = create_out(sizes, strides, options);
2252 if (!names.empty()) {
2253 namedinference::propagate_names(*outputs_[output_idx], names);
2254 }
2255 // super must happen after, so that downstream can use maybe_get_output
2256 // to retrieve the output
2257 at::native::structured_asinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2258 }
2259 void set_output_raw_strided(
2260 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2261 TensorOptions options, DimnameList names
2262 ) override {
2263 outputs_[output_idx] = create_out(sizes, strides, options);
2264 if (!names.empty()) {
2265 namedinference::propagate_names(*outputs_[output_idx], names);
2266 }
2267 // super must happen after, so that downstream can use maybe_get_output
2268 // to retrieve the output
2269 at::native::structured_asinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2270 }
2271 const Tensor& maybe_get_output(int64_t output_idx) override {
2272 return *outputs_[output_idx];
2273 }
2274 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2275};
2276at::Tensor wrapper_CPU_asinh(const at::Tensor & self) {
2277structured_asinh_out_functional op;
2278op.meta(self);
2279op.impl(self, *op.outputs_[0]);
2280return std::move(op.outputs_[0]).take();
2281}
2282struct structured_asinh_out_out final : public at::native::structured_asinh_out {
2283 structured_asinh_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
2284 void set_output_strided(
2285 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2286 TensorOptions options, DimnameList names
2287 ) override {
2288 const auto& out = outputs_[output_idx].get();
2289 resize_out(out, sizes, strides, options);
2290 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2291 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2292 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2293 }
2294 if (!names.empty()) {
2295 namedinference::propagate_names(outputs_[output_idx], names);
2296 }
2297 // super must happen after, so that downstream can use maybe_get_output
2298 // to retrieve the output
2299 at::native::structured_asinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2300 }
2301 void set_output_raw_strided(
2302 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2303 TensorOptions options, DimnameList names
2304 ) override {
2305 const auto& out = outputs_[output_idx].get();
2306 resize_out(out, sizes, strides, options);
2307 if (!names.empty()) {
2308 namedinference::propagate_names(outputs_[output_idx], names);
2309 }
2310 // super must happen after, so that downstream can use maybe_get_output
2311 // to retrieve the output
2312 at::native::structured_asinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2313 }
2314 const Tensor& maybe_get_output(int64_t output_idx) override {
2315 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2316 }
2317 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2318 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2319};
2320at::Tensor & wrapper_CPU_asinh_out_out(const at::Tensor & self, at::Tensor & out) {
2321structured_asinh_out_out op(out);
2322op.meta(self);
2323op.impl(self, op.maybe_get_output(0));
2324if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2325return out;
2326}
2327struct structured_asinh_out_inplace final : public at::native::structured_asinh_out {
2328 structured_asinh_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2329 void set_output_strided(
2330 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2331 TensorOptions options, DimnameList names
2332 ) override {
2333 const auto& out = outputs_[output_idx].get();
2334 check_inplace(out, sizes, options);
2335 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2336 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2337 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2338 }
2339 if (!names.empty()) {
2340 namedinference::propagate_names(outputs_[output_idx], names);
2341 }
2342 // super must happen after, so that downstream can use maybe_get_output
2343 // to retrieve the output
2344 at::native::structured_asinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2345 }
2346 void set_output_raw_strided(
2347 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2348 TensorOptions options, DimnameList names
2349 ) override {
2350 const auto& out = outputs_[output_idx].get();
2351 check_inplace(out, sizes, options);
2352 if (!names.empty()) {
2353 namedinference::propagate_names(outputs_[output_idx], names);
2354 }
2355 // super must happen after, so that downstream can use maybe_get_output
2356 // to retrieve the output
2357 at::native::structured_asinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2358 }
2359 const Tensor& maybe_get_output(int64_t output_idx) override {
2360 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2361 }
2362 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2363 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2364};
2365at::Tensor & wrapper_CPU_asinh_(at::Tensor & self) {
2366structured_asinh_out_inplace op(self);
2367op.meta(self);
2368op.impl(self, op.outputs_[0]);
2369if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2370return self;
2371}
2372struct structured_atanh_out_functional final : public at::native::structured_atanh_out {
2373 void set_output_strided(
2374 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2375 TensorOptions options, DimnameList names
2376 ) override {
2377 outputs_[output_idx] = create_out(sizes, strides, options);
2378 if (!names.empty()) {
2379 namedinference::propagate_names(*outputs_[output_idx], names);
2380 }
2381 // super must happen after, so that downstream can use maybe_get_output
2382 // to retrieve the output
2383 at::native::structured_atanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2384 }
2385 void set_output_raw_strided(
2386 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2387 TensorOptions options, DimnameList names
2388 ) override {
2389 outputs_[output_idx] = create_out(sizes, strides, options);
2390 if (!names.empty()) {
2391 namedinference::propagate_names(*outputs_[output_idx], names);
2392 }
2393 // super must happen after, so that downstream can use maybe_get_output
2394 // to retrieve the output
2395 at::native::structured_atanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2396 }
2397 const Tensor& maybe_get_output(int64_t output_idx) override {
2398 return *outputs_[output_idx];
2399 }
2400 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2401};
2402at::Tensor wrapper_CPU_atanh(const at::Tensor & self) {
2403structured_atanh_out_functional op;
2404op.meta(self);
2405op.impl(self, *op.outputs_[0]);
2406return std::move(op.outputs_[0]).take();
2407}
2408struct structured_atanh_out_out final : public at::native::structured_atanh_out {
2409 structured_atanh_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
2410 void set_output_strided(
2411 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2412 TensorOptions options, DimnameList names
2413 ) override {
2414 const auto& out = outputs_[output_idx].get();
2415 resize_out(out, sizes, strides, options);
2416 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2417 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2418 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2419 }
2420 if (!names.empty()) {
2421 namedinference::propagate_names(outputs_[output_idx], names);
2422 }
2423 // super must happen after, so that downstream can use maybe_get_output
2424 // to retrieve the output
2425 at::native::structured_atanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2426 }
2427 void set_output_raw_strided(
2428 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2429 TensorOptions options, DimnameList names
2430 ) override {
2431 const auto& out = outputs_[output_idx].get();
2432 resize_out(out, sizes, strides, options);
2433 if (!names.empty()) {
2434 namedinference::propagate_names(outputs_[output_idx], names);
2435 }
2436 // super must happen after, so that downstream can use maybe_get_output
2437 // to retrieve the output
2438 at::native::structured_atanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2439 }
2440 const Tensor& maybe_get_output(int64_t output_idx) override {
2441 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2442 }
2443 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2444 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2445};
2446at::Tensor & wrapper_CPU_atanh_out_out(const at::Tensor & self, at::Tensor & out) {
2447structured_atanh_out_out op(out);
2448op.meta(self);
2449op.impl(self, op.maybe_get_output(0));
2450if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2451return out;
2452}
2453struct structured_atanh_out_inplace final : public at::native::structured_atanh_out {
2454 structured_atanh_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2455 void set_output_strided(
2456 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2457 TensorOptions options, DimnameList names
2458 ) override {
2459 const auto& out = outputs_[output_idx].get();
2460 check_inplace(out, sizes, options);
2461 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2462 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2463 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2464 }
2465 if (!names.empty()) {
2466 namedinference::propagate_names(outputs_[output_idx], names);
2467 }
2468 // super must happen after, so that downstream can use maybe_get_output
2469 // to retrieve the output
2470 at::native::structured_atanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2471 }
2472 void set_output_raw_strided(
2473 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2474 TensorOptions options, DimnameList names
2475 ) override {
2476 const auto& out = outputs_[output_idx].get();
2477 check_inplace(out, sizes, options);
2478 if (!names.empty()) {
2479 namedinference::propagate_names(outputs_[output_idx], names);
2480 }
2481 // super must happen after, so that downstream can use maybe_get_output
2482 // to retrieve the output
2483 at::native::structured_atanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2484 }
2485 const Tensor& maybe_get_output(int64_t output_idx) override {
2486 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2487 }
2488 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2489 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2490};
2491at::Tensor & wrapper_CPU_atanh_(at::Tensor & self) {
2492structured_atanh_out_inplace op(self);
2493op.meta(self);
2494op.impl(self, op.outputs_[0]);
2495if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2496return self;
2497}
2498namespace {
2499at::Tensor wrapper_CPU__as_strided(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
2500 // No device check
2501 // DeviceGuard omitted
2502 return at::native::as_strided_tensorimpl(self, C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride), storage_offset.has_value() ? c10::make_optional(storage_offset->expect_int()) : c10::nullopt);
2503}
2504} // anonymous namespace
2505struct structured_asin_out_functional final : public at::native::structured_asin_out {
2506 void set_output_strided(
2507 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2508 TensorOptions options, DimnameList names
2509 ) override {
2510 outputs_[output_idx] = create_out(sizes, strides, options);
2511 if (!names.empty()) {
2512 namedinference::propagate_names(*outputs_[output_idx], names);
2513 }
2514 // super must happen after, so that downstream can use maybe_get_output
2515 // to retrieve the output
2516 at::native::structured_asin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2517 }
2518 void set_output_raw_strided(
2519 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2520 TensorOptions options, DimnameList names
2521 ) override {
2522 outputs_[output_idx] = create_out(sizes, strides, options);
2523 if (!names.empty()) {
2524 namedinference::propagate_names(*outputs_[output_idx], names);
2525 }
2526 // super must happen after, so that downstream can use maybe_get_output
2527 // to retrieve the output
2528 at::native::structured_asin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2529 }
2530 const Tensor& maybe_get_output(int64_t output_idx) override {
2531 return *outputs_[output_idx];
2532 }
2533 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2534};
2535at::Tensor wrapper_CPU_asin(const at::Tensor & self) {
2536structured_asin_out_functional op;
2537op.meta(self);
2538op.impl(self, *op.outputs_[0]);
2539return std::move(op.outputs_[0]).take();
2540}
2541struct structured_asin_out_out final : public at::native::structured_asin_out {
2542 structured_asin_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
2543 void set_output_strided(
2544 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2545 TensorOptions options, DimnameList names
2546 ) override {
2547 const auto& out = outputs_[output_idx].get();
2548 resize_out(out, sizes, strides, options);
2549 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2550 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2551 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2552 }
2553 if (!names.empty()) {
2554 namedinference::propagate_names(outputs_[output_idx], names);
2555 }
2556 // super must happen after, so that downstream can use maybe_get_output
2557 // to retrieve the output
2558 at::native::structured_asin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2559 }
2560 void set_output_raw_strided(
2561 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2562 TensorOptions options, DimnameList names
2563 ) override {
2564 const auto& out = outputs_[output_idx].get();
2565 resize_out(out, sizes, strides, options);
2566 if (!names.empty()) {
2567 namedinference::propagate_names(outputs_[output_idx], names);
2568 }
2569 // super must happen after, so that downstream can use maybe_get_output
2570 // to retrieve the output
2571 at::native::structured_asin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2572 }
2573 const Tensor& maybe_get_output(int64_t output_idx) override {
2574 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2575 }
2576 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2577 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2578};
2579at::Tensor & wrapper_CPU_asin_out_out(const at::Tensor & self, at::Tensor & out) {
2580structured_asin_out_out op(out);
2581op.meta(self);
2582op.impl(self, op.maybe_get_output(0));
2583if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2584return out;
2585}
2586struct structured_asin_out_inplace final : public at::native::structured_asin_out {
2587 structured_asin_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2588 void set_output_strided(
2589 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2590 TensorOptions options, DimnameList names
2591 ) override {
2592 const auto& out = outputs_[output_idx].get();
2593 check_inplace(out, sizes, options);
2594 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2595 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2596 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2597 }
2598 if (!names.empty()) {
2599 namedinference::propagate_names(outputs_[output_idx], names);
2600 }
2601 // super must happen after, so that downstream can use maybe_get_output
2602 // to retrieve the output
2603 at::native::structured_asin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2604 }
2605 void set_output_raw_strided(
2606 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2607 TensorOptions options, DimnameList names
2608 ) override {
2609 const auto& out = outputs_[output_idx].get();
2610 check_inplace(out, sizes, options);
2611 if (!names.empty()) {
2612 namedinference::propagate_names(outputs_[output_idx], names);
2613 }
2614 // super must happen after, so that downstream can use maybe_get_output
2615 // to retrieve the output
2616 at::native::structured_asin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2617 }
2618 const Tensor& maybe_get_output(int64_t output_idx) override {
2619 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2620 }
2621 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2622 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2623};
2624at::Tensor & wrapper_CPU_asin_(at::Tensor & self) {
2625structured_asin_out_inplace op(self);
2626op.meta(self);
2627op.impl(self, op.outputs_[0]);
2628if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2629return self;
2630}
2631struct structured_atan_out_functional final : public at::native::structured_atan_out {
2632 void set_output_strided(
2633 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2634 TensorOptions options, DimnameList names
2635 ) override {
2636 outputs_[output_idx] = create_out(sizes, strides, options);
2637 if (!names.empty()) {
2638 namedinference::propagate_names(*outputs_[output_idx], names);
2639 }
2640 // super must happen after, so that downstream can use maybe_get_output
2641 // to retrieve the output
2642 at::native::structured_atan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2643 }
2644 void set_output_raw_strided(
2645 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2646 TensorOptions options, DimnameList names
2647 ) override {
2648 outputs_[output_idx] = create_out(sizes, strides, options);
2649 if (!names.empty()) {
2650 namedinference::propagate_names(*outputs_[output_idx], names);
2651 }
2652 // super must happen after, so that downstream can use maybe_get_output
2653 // to retrieve the output
2654 at::native::structured_atan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2655 }
2656 const Tensor& maybe_get_output(int64_t output_idx) override {
2657 return *outputs_[output_idx];
2658 }
2659 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2660};
2661at::Tensor wrapper_CPU_atan(const at::Tensor & self) {
2662structured_atan_out_functional op;
2663op.meta(self);
2664op.impl(self, *op.outputs_[0]);
2665return std::move(op.outputs_[0]).take();
2666}
2667struct structured_atan_out_out final : public at::native::structured_atan_out {
2668 structured_atan_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
2669 void set_output_strided(
2670 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2671 TensorOptions options, DimnameList names
2672 ) override {
2673 const auto& out = outputs_[output_idx].get();
2674 resize_out(out, sizes, strides, options);
2675 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2676 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2677 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2678 }
2679 if (!names.empty()) {
2680 namedinference::propagate_names(outputs_[output_idx], names);
2681 }
2682 // super must happen after, so that downstream can use maybe_get_output
2683 // to retrieve the output
2684 at::native::structured_atan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2685 }
2686 void set_output_raw_strided(
2687 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2688 TensorOptions options, DimnameList names
2689 ) override {
2690 const auto& out = outputs_[output_idx].get();
2691 resize_out(out, sizes, strides, options);
2692 if (!names.empty()) {
2693 namedinference::propagate_names(outputs_[output_idx], names);
2694 }
2695 // super must happen after, so that downstream can use maybe_get_output
2696 // to retrieve the output
2697 at::native::structured_atan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2698 }
2699 const Tensor& maybe_get_output(int64_t output_idx) override {
2700 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2701 }
2702 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2703 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2704};
2705at::Tensor & wrapper_CPU_atan_out_out(const at::Tensor & self, at::Tensor & out) {
2706structured_atan_out_out op(out);
2707op.meta(self);
2708op.impl(self, op.maybe_get_output(0));
2709if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2710return out;
2711}
2712struct structured_atan_out_inplace final : public at::native::structured_atan_out {
2713 structured_atan_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2714 void set_output_strided(
2715 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2716 TensorOptions options, DimnameList names
2717 ) override {
2718 const auto& out = outputs_[output_idx].get();
2719 check_inplace(out, sizes, options);
2720 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2721 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2722 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2723 }
2724 if (!names.empty()) {
2725 namedinference::propagate_names(outputs_[output_idx], names);
2726 }
2727 // super must happen after, so that downstream can use maybe_get_output
2728 // to retrieve the output
2729 at::native::structured_atan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2730 }
2731 void set_output_raw_strided(
2732 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2733 TensorOptions options, DimnameList names
2734 ) override {
2735 const auto& out = outputs_[output_idx].get();
2736 check_inplace(out, sizes, options);
2737 if (!names.empty()) {
2738 namedinference::propagate_names(outputs_[output_idx], names);
2739 }
2740 // super must happen after, so that downstream can use maybe_get_output
2741 // to retrieve the output
2742 at::native::structured_atan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2743 }
2744 const Tensor& maybe_get_output(int64_t output_idx) override {
2745 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2746 }
2747 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2748 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2749};
2750at::Tensor & wrapper_CPU_atan_(at::Tensor & self) {
2751structured_atan_out_inplace op(self);
2752op.meta(self);
2753op.impl(self, op.outputs_[0]);
2754if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2755return self;
2756}
2757struct structured_baddbmm_out_cpu_functional final : public at::native::structured_baddbmm_out_cpu {
2758 void set_output_strided(
2759 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2760 TensorOptions options, DimnameList names
2761 ) override {
2762 outputs_[output_idx] = create_out(sizes, strides, options);
2763 if (!names.empty()) {
2764 namedinference::propagate_names(*outputs_[output_idx], names);
2765 }
2766 // super must happen after, so that downstream can use maybe_get_output
2767 // to retrieve the output
2768 }
2769 void set_output_raw_strided(
2770 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2771 TensorOptions options, DimnameList names
2772 ) override {
2773 outputs_[output_idx] = create_out(sizes, strides, options);
2774 if (!names.empty()) {
2775 namedinference::propagate_names(*outputs_[output_idx], names);
2776 }
2777 // super must happen after, so that downstream can use maybe_get_output
2778 // to retrieve the output
2779 }
2780 const Tensor& maybe_get_output(int64_t output_idx) override {
2781 return *outputs_[output_idx];
2782 }
2783 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2784};
2785at::Tensor wrapper_CPU_baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
2786structured_baddbmm_out_cpu_functional op;
2787op.meta(self, batch1, batch2, beta, alpha);
2788op.impl(self, batch1, batch2, beta, alpha, *op.outputs_[0]);
2789return std::move(op.outputs_[0]).take();
2790}
2791struct structured_baddbmm_out_cpu_out final : public at::native::structured_baddbmm_out_cpu {
2792 structured_baddbmm_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
2793 void set_output_strided(
2794 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2795 TensorOptions options, DimnameList names
2796 ) override {
2797 const auto& out = outputs_[output_idx].get();
2798 resize_out(out, sizes, strides, options);
2799 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2800 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2801 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2802 }
2803 if (!names.empty()) {
2804 namedinference::propagate_names(outputs_[output_idx], names);
2805 }
2806 // super must happen after, so that downstream can use maybe_get_output
2807 // to retrieve the output
2808 }
2809 void set_output_raw_strided(
2810 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2811 TensorOptions options, DimnameList names
2812 ) override {
2813 const auto& out = outputs_[output_idx].get();
2814 resize_out(out, sizes, strides, options);
2815 if (!names.empty()) {
2816 namedinference::propagate_names(outputs_[output_idx], names);
2817 }
2818 // super must happen after, so that downstream can use maybe_get_output
2819 // to retrieve the output
2820 }
2821 const Tensor& maybe_get_output(int64_t output_idx) override {
2822 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2823 }
2824 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2825 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2826};
2827at::Tensor & wrapper_CPU_baddbmm_out_out(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
2828structured_baddbmm_out_cpu_out op(out);
2829op.meta(self, batch1, batch2, beta, alpha);
2830op.impl(self, batch1, batch2, beta, alpha, op.maybe_get_output(0));
2831if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2832return out;
2833}
2834struct structured_baddbmm_out_cpu_inplace final : public at::native::structured_baddbmm_out_cpu {
2835 structured_baddbmm_out_cpu_inplace(Tensor& self) : outputs_{std::ref(self)} {}
2836 void set_output_strided(
2837 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2838 TensorOptions options, DimnameList names
2839 ) override {
2840 const auto& out = outputs_[output_idx].get();
2841 check_inplace(out, sizes, options);
2842 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2843 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2844 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2845 }
2846 if (!names.empty()) {
2847 namedinference::propagate_names(outputs_[output_idx], names);
2848 }
2849 // super must happen after, so that downstream can use maybe_get_output
2850 // to retrieve the output
2851 }
2852 void set_output_raw_strided(
2853 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2854 TensorOptions options, DimnameList names
2855 ) override {
2856 const auto& out = outputs_[output_idx].get();
2857 check_inplace(out, sizes, options);
2858 if (!names.empty()) {
2859 namedinference::propagate_names(outputs_[output_idx], names);
2860 }
2861 // super must happen after, so that downstream can use maybe_get_output
2862 // to retrieve the output
2863 }
2864 const Tensor& maybe_get_output(int64_t output_idx) override {
2865 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
2866 }
2867 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
2868 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
2869};
2870at::Tensor & wrapper_CPU_baddbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
2871structured_baddbmm_out_cpu_inplace op(self);
2872op.meta(self, batch1, batch2, beta, alpha);
2873op.impl(self, batch1, batch2, beta, alpha, op.outputs_[0]);
2874if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
2875return self;
2876}
2877namespace {
2878at::Tensor & wrapper_CPU_out_bernoulli_out(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) {
2879 // No device check
2880 // DeviceGuard omitted
2881 return at::native::bernoulli_out(self, generator, out);
2882}
2883} // anonymous namespace
2884namespace {
2885at::Tensor & wrapper_CPU_Tensor_bernoulli_(at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) {
2886 // No device check
2887 // DeviceGuard omitted
2888 return at::native::bernoulli_(self, p, generator);
2889}
2890} // anonymous namespace
2891namespace {
2892at::Tensor & wrapper_CPU_float_bernoulli_(at::Tensor & self, double p, c10::optional<at::Generator> generator) {
2893 // No device check
2894 // DeviceGuard omitted
2895 return at::native::bernoulli_(self, p, generator);
2896}
2897} // anonymous namespace
2898namespace {
2899at::Tensor wrapper_CPU__binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
2900 // No device check
2901 // DeviceGuard omitted
2902 return at::native::binary_cross_entropy_cpu(self, target, weight, reduction);
2903}
2904} // anonymous namespace
2905namespace {
2906at::Tensor & wrapper_CPU_out_binary_cross_entropy_out(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
2907 // No device check
2908 // DeviceGuard omitted
2909 return at::native::binary_cross_entropy_out_cpu(self, target, weight, reduction, out);
2910}
2911} // anonymous namespace
2912namespace {
2913at::Tensor wrapper_CPU__binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
2914 // No device check
2915 // DeviceGuard omitted
2916 return at::native::binary_cross_entropy_backward_cpu(grad_output, self, target, weight, reduction);
2917}
2918} // anonymous namespace
2919namespace {
2920at::Tensor & wrapper_CPU_grad_input_binary_cross_entropy_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
2921 // No device check
2922 // DeviceGuard omitted
2923 return at::native::binary_cross_entropy_backward_out_cpu(grad_output, self, target, weight, reduction, grad_input);
2924}
2925} // anonymous namespace
2926namespace {
2927at::Tensor wrapper_CPU__bincount(const at::Tensor & self, const c10::optional<at::Tensor> & weights, int64_t minlength) {
2928 // No device check
2929 // DeviceGuard omitted
2930 return at::native::_bincount_cpu(self, weights, minlength);
2931}
2932} // anonymous namespace
2933struct structured_bitwise_not_out_functional final : public at::native::structured_bitwise_not_out {
2934 void set_output_strided(
2935 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2936 TensorOptions options, DimnameList names
2937 ) override {
2938 outputs_[output_idx] = create_out(sizes, strides, options);
2939 if (!names.empty()) {
2940 namedinference::propagate_names(*outputs_[output_idx], names);
2941 }
2942 // super must happen after, so that downstream can use maybe_get_output
2943 // to retrieve the output
2944 at::native::structured_bitwise_not_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2945 }
2946 void set_output_raw_strided(
2947 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2948 TensorOptions options, DimnameList names
2949 ) override {
2950 outputs_[output_idx] = create_out(sizes, strides, options);
2951 if (!names.empty()) {
2952 namedinference::propagate_names(*outputs_[output_idx], names);
2953 }
2954 // super must happen after, so that downstream can use maybe_get_output
2955 // to retrieve the output
2956 at::native::structured_bitwise_not_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2957 }
2958 const Tensor& maybe_get_output(int64_t output_idx) override {
2959 return *outputs_[output_idx];
2960 }
2961 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
2962};
2963at::Tensor wrapper_CPU_bitwise_not(const at::Tensor & self) {
2964structured_bitwise_not_out_functional op;
2965op.meta(self);
2966op.impl(self, *op.outputs_[0]);
2967return std::move(op.outputs_[0]).take();
2968}
2969struct structured_bitwise_not_out_out final : public at::native::structured_bitwise_not_out {
2970 structured_bitwise_not_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
2971 void set_output_strided(
2972 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2973 TensorOptions options, DimnameList names
2974 ) override {
2975 const auto& out = outputs_[output_idx].get();
2976 resize_out(out, sizes, strides, options);
2977 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
2978 if (C10_UNLIKELY(maybe_proxy.has_value())) {
2979 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
2980 }
2981 if (!names.empty()) {
2982 namedinference::propagate_names(outputs_[output_idx], names);
2983 }
2984 // super must happen after, so that downstream can use maybe_get_output
2985 // to retrieve the output
2986 at::native::structured_bitwise_not_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
2987 }
2988 void set_output_raw_strided(
2989 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
2990 TensorOptions options, DimnameList names
2991 ) override {
2992 const auto& out = outputs_[output_idx].get();
2993 resize_out(out, sizes, strides, options);
2994 if (!names.empty()) {
2995 namedinference::propagate_names(outputs_[output_idx], names);
2996 }
2997 // super must happen after, so that downstream can use maybe_get_output
2998 // to retrieve the output
2999 at::native::structured_bitwise_not_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3000 }
3001 const Tensor& maybe_get_output(int64_t output_idx) override {
3002 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3003 }
3004 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3005 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3006};
3007at::Tensor & wrapper_CPU_bitwise_not_out_out(const at::Tensor & self, at::Tensor & out) {
3008structured_bitwise_not_out_out op(out);
3009op.meta(self);
3010op.impl(self, op.maybe_get_output(0));
3011if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3012return out;
3013}
3014struct structured_bitwise_not_out_inplace final : public at::native::structured_bitwise_not_out {
3015 structured_bitwise_not_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3016 void set_output_strided(
3017 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3018 TensorOptions options, DimnameList names
3019 ) override {
3020 const auto& out = outputs_[output_idx].get();
3021 check_inplace(out, sizes, options);
3022 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3023 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3024 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3025 }
3026 if (!names.empty()) {
3027 namedinference::propagate_names(outputs_[output_idx], names);
3028 }
3029 // super must happen after, so that downstream can use maybe_get_output
3030 // to retrieve the output
3031 at::native::structured_bitwise_not_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3032 }
3033 void set_output_raw_strided(
3034 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3035 TensorOptions options, DimnameList names
3036 ) override {
3037 const auto& out = outputs_[output_idx].get();
3038 check_inplace(out, sizes, options);
3039 if (!names.empty()) {
3040 namedinference::propagate_names(outputs_[output_idx], names);
3041 }
3042 // super must happen after, so that downstream can use maybe_get_output
3043 // to retrieve the output
3044 at::native::structured_bitwise_not_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3045 }
3046 const Tensor& maybe_get_output(int64_t output_idx) override {
3047 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3048 }
3049 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3050 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3051};
3052at::Tensor & wrapper_CPU_bitwise_not_(at::Tensor & self) {
3053structured_bitwise_not_out_inplace op(self);
3054op.meta(self);
3055op.impl(self, op.outputs_[0]);
3056if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3057return self;
3058}
3059struct structured_copysign_out_functional final : public at::native::structured_copysign_out {
3060 void set_output_strided(
3061 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3062 TensorOptions options, DimnameList names
3063 ) override {
3064 outputs_[output_idx] = create_out(sizes, strides, options);
3065 if (!names.empty()) {
3066 namedinference::propagate_names(*outputs_[output_idx], names);
3067 }
3068 // super must happen after, so that downstream can use maybe_get_output
3069 // to retrieve the output
3070 at::native::structured_copysign_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3071 }
3072 void set_output_raw_strided(
3073 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3074 TensorOptions options, DimnameList names
3075 ) override {
3076 outputs_[output_idx] = create_out(sizes, strides, options);
3077 if (!names.empty()) {
3078 namedinference::propagate_names(*outputs_[output_idx], names);
3079 }
3080 // super must happen after, so that downstream can use maybe_get_output
3081 // to retrieve the output
3082 at::native::structured_copysign_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3083 }
3084 const Tensor& maybe_get_output(int64_t output_idx) override {
3085 return *outputs_[output_idx];
3086 }
3087 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3088};
3089at::Tensor wrapper_CPU_copysign_Tensor(const at::Tensor & self, const at::Tensor & other) {
3090structured_copysign_out_functional op;
3091op.meta(self, other);
3092op.impl(self, other, *op.outputs_[0]);
3093return std::move(op.outputs_[0]).take();
3094}
3095struct structured_copysign_out_out final : public at::native::structured_copysign_out {
3096 structured_copysign_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
3097 void set_output_strided(
3098 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3099 TensorOptions options, DimnameList names
3100 ) override {
3101 const auto& out = outputs_[output_idx].get();
3102 resize_out(out, sizes, strides, options);
3103 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3104 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3105 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3106 }
3107 if (!names.empty()) {
3108 namedinference::propagate_names(outputs_[output_idx], names);
3109 }
3110 // super must happen after, so that downstream can use maybe_get_output
3111 // to retrieve the output
3112 at::native::structured_copysign_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3113 }
3114 void set_output_raw_strided(
3115 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3116 TensorOptions options, DimnameList names
3117 ) override {
3118 const auto& out = outputs_[output_idx].get();
3119 resize_out(out, sizes, strides, options);
3120 if (!names.empty()) {
3121 namedinference::propagate_names(outputs_[output_idx], names);
3122 }
3123 // super must happen after, so that downstream can use maybe_get_output
3124 // to retrieve the output
3125 at::native::structured_copysign_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3126 }
3127 const Tensor& maybe_get_output(int64_t output_idx) override {
3128 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3129 }
3130 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3131 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3132};
3133at::Tensor & wrapper_CPU_copysign_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3134structured_copysign_out_out op(out);
3135op.meta(self, other);
3136op.impl(self, other, op.maybe_get_output(0));
3137if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3138return out;
3139}
3140struct structured_copysign_out_inplace final : public at::native::structured_copysign_out {
3141 structured_copysign_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3142 void set_output_strided(
3143 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3144 TensorOptions options, DimnameList names
3145 ) override {
3146 const auto& out = outputs_[output_idx].get();
3147 check_inplace(out, sizes, options);
3148 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3149 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3150 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3151 }
3152 if (!names.empty()) {
3153 namedinference::propagate_names(outputs_[output_idx], names);
3154 }
3155 // super must happen after, so that downstream can use maybe_get_output
3156 // to retrieve the output
3157 at::native::structured_copysign_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3158 }
3159 void set_output_raw_strided(
3160 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3161 TensorOptions options, DimnameList names
3162 ) override {
3163 const auto& out = outputs_[output_idx].get();
3164 check_inplace(out, sizes, options);
3165 if (!names.empty()) {
3166 namedinference::propagate_names(outputs_[output_idx], names);
3167 }
3168 // super must happen after, so that downstream can use maybe_get_output
3169 // to retrieve the output
3170 at::native::structured_copysign_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3171 }
3172 const Tensor& maybe_get_output(int64_t output_idx) override {
3173 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3174 }
3175 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3176 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3177};
3178at::Tensor & wrapper_CPU_copysign__Tensor(at::Tensor & self, const at::Tensor & other) {
3179structured_copysign_out_inplace op(self);
3180op.meta(self, other);
3181op.impl(self, other, op.outputs_[0]);
3182if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3183return self;
3184}
3185namespace {
3186at::Tensor & wrapper_CPU_out_logical_not_out(const at::Tensor & self, at::Tensor & out) {
3187 // No device check
3188 // DeviceGuard omitted
3189 return at::native::logical_not_out(self, out);
3190}
3191} // anonymous namespace
3192namespace {
3193at::Tensor & wrapper_CPU_out_logical_xor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3194 // No device check
3195 // DeviceGuard omitted
3196 return at::native::logical_xor_out(self, other, out);
3197}
3198} // anonymous namespace
3199namespace {
3200at::Tensor & wrapper_CPU_out_logical_and_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3201 // No device check
3202 // DeviceGuard omitted
3203 return at::native::logical_and_out(self, other, out);
3204}
3205} // anonymous namespace
3206namespace {
3207at::Tensor & wrapper_CPU_out_logical_or_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3208 // No device check
3209 // DeviceGuard omitted
3210 return at::native::logical_or_out(self, other, out);
3211}
3212} // anonymous namespace
3213struct structured_bmm_out_cpu_functional final : public at::native::structured_bmm_out_cpu {
3214 void set_output_strided(
3215 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3216 TensorOptions options, DimnameList names
3217 ) override {
3218 outputs_[output_idx] = create_out(sizes, strides, options);
3219 if (!names.empty()) {
3220 namedinference::propagate_names(*outputs_[output_idx], names);
3221 }
3222 // super must happen after, so that downstream can use maybe_get_output
3223 // to retrieve the output
3224 }
3225 void set_output_raw_strided(
3226 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3227 TensorOptions options, DimnameList names
3228 ) override {
3229 outputs_[output_idx] = create_out(sizes, strides, options);
3230 if (!names.empty()) {
3231 namedinference::propagate_names(*outputs_[output_idx], names);
3232 }
3233 // super must happen after, so that downstream can use maybe_get_output
3234 // to retrieve the output
3235 }
3236 const Tensor& maybe_get_output(int64_t output_idx) override {
3237 return *outputs_[output_idx];
3238 }
3239 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3240};
3241at::Tensor wrapper_CPU_bmm(const at::Tensor & self, const at::Tensor & mat2) {
3242structured_bmm_out_cpu_functional op;
3243op.meta(self, mat2);
3244op.impl(self, mat2, *op.outputs_[0]);
3245return std::move(op.outputs_[0]).take();
3246}
3247struct structured_bmm_out_cpu_out final : public at::native::structured_bmm_out_cpu {
3248 structured_bmm_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
3249 void set_output_strided(
3250 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3251 TensorOptions options, DimnameList names
3252 ) override {
3253 const auto& out = outputs_[output_idx].get();
3254 resize_out(out, sizes, strides, options);
3255 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3256 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3257 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3258 }
3259 if (!names.empty()) {
3260 namedinference::propagate_names(outputs_[output_idx], names);
3261 }
3262 // super must happen after, so that downstream can use maybe_get_output
3263 // to retrieve the output
3264 }
3265 void set_output_raw_strided(
3266 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3267 TensorOptions options, DimnameList names
3268 ) override {
3269 const auto& out = outputs_[output_idx].get();
3270 resize_out(out, sizes, strides, options);
3271 if (!names.empty()) {
3272 namedinference::propagate_names(outputs_[output_idx], names);
3273 }
3274 // super must happen after, so that downstream can use maybe_get_output
3275 // to retrieve the output
3276 }
3277 const Tensor& maybe_get_output(int64_t output_idx) override {
3278 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3279 }
3280 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3281 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3282};
3283at::Tensor & wrapper_CPU_bmm_out_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
3284structured_bmm_out_cpu_out op(out);
3285op.meta(self, mat2);
3286op.impl(self, mat2, op.maybe_get_output(0));
3287if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3288return out;
3289}
3290struct structured_cat_out_cpu_functional final : public at::native::structured_cat_out_cpu {
3291 void set_output_strided(
3292 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3293 TensorOptions options, DimnameList names
3294 ) override {
3295 outputs_[output_idx] = create_out(sizes, strides, options);
3296 if (!names.empty()) {
3297 namedinference::propagate_names(*outputs_[output_idx], names);
3298 }
3299 // super must happen after, so that downstream can use maybe_get_output
3300 // to retrieve the output
3301 }
3302 void set_output_raw_strided(
3303 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3304 TensorOptions options, DimnameList names
3305 ) override {
3306 outputs_[output_idx] = create_out(sizes, strides, options);
3307 if (!names.empty()) {
3308 namedinference::propagate_names(*outputs_[output_idx], names);
3309 }
3310 // super must happen after, so that downstream can use maybe_get_output
3311 // to retrieve the output
3312 }
3313 const Tensor& maybe_get_output(int64_t output_idx) override {
3314 return *outputs_[output_idx];
3315 }
3316 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3317};
3318at::Tensor wrapper_CPU_cat(const at::ITensorListRef & tensors, int64_t dim) {
3319structured_cat_out_cpu_functional op;
3320auto precompute = op.meta(tensors, dim);
3321(void)precompute;
3322op.impl(tensors, precompute.dim, precompute.valid, precompute.all_contiguous, precompute.all_same_dtype, precompute.all_same_sizes_and_stride, precompute.memory_format, *op.outputs_[0]);
3323return std::move(op.outputs_[0]).take();
3324}
3325struct structured_cat_out_cpu_out final : public at::native::structured_cat_out_cpu {
3326 structured_cat_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
3327 void set_output_strided(
3328 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3329 TensorOptions options, DimnameList names
3330 ) override {
3331 const auto& out = outputs_[output_idx].get();
3332 resize_out(out, sizes, strides, options);
3333 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3334 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3335 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3336 }
3337 if (!names.empty()) {
3338 namedinference::propagate_names(outputs_[output_idx], names);
3339 }
3340 // super must happen after, so that downstream can use maybe_get_output
3341 // to retrieve the output
3342 }
3343 void set_output_raw_strided(
3344 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3345 TensorOptions options, DimnameList names
3346 ) override {
3347 const auto& out = outputs_[output_idx].get();
3348 resize_out(out, sizes, strides, options);
3349 if (!names.empty()) {
3350 namedinference::propagate_names(outputs_[output_idx], names);
3351 }
3352 // super must happen after, so that downstream can use maybe_get_output
3353 // to retrieve the output
3354 }
3355 const Tensor& maybe_get_output(int64_t output_idx) override {
3356 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3357 }
3358 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3359 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3360};
3361at::Tensor & wrapper_CPU_cat_out_out(const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) {
3362structured_cat_out_cpu_out op(out);
3363auto precompute = op.meta(tensors, dim);
3364(void)precompute;
3365op.impl(tensors, precompute.dim, precompute.valid, precompute.all_contiguous, precompute.all_same_dtype, precompute.all_same_sizes_and_stride, precompute.memory_format, op.maybe_get_output(0));
3366if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3367return out;
3368}
3369struct structured_ceil_out_functional final : public at::native::structured_ceil_out {
3370 void set_output_strided(
3371 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3372 TensorOptions options, DimnameList names
3373 ) override {
3374 outputs_[output_idx] = create_out(sizes, strides, options);
3375 if (!names.empty()) {
3376 namedinference::propagate_names(*outputs_[output_idx], names);
3377 }
3378 // super must happen after, so that downstream can use maybe_get_output
3379 // to retrieve the output
3380 at::native::structured_ceil_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3381 }
3382 void set_output_raw_strided(
3383 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3384 TensorOptions options, DimnameList names
3385 ) override {
3386 outputs_[output_idx] = create_out(sizes, strides, options);
3387 if (!names.empty()) {
3388 namedinference::propagate_names(*outputs_[output_idx], names);
3389 }
3390 // super must happen after, so that downstream can use maybe_get_output
3391 // to retrieve the output
3392 at::native::structured_ceil_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3393 }
3394 const Tensor& maybe_get_output(int64_t output_idx) override {
3395 return *outputs_[output_idx];
3396 }
3397 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3398};
3399at::Tensor wrapper_CPU_ceil(const at::Tensor & self) {
3400structured_ceil_out_functional op;
3401op.meta(self);
3402op.impl(self, *op.outputs_[0]);
3403return std::move(op.outputs_[0]).take();
3404}
3405struct structured_ceil_out_out final : public at::native::structured_ceil_out {
3406 structured_ceil_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
3407 void set_output_strided(
3408 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3409 TensorOptions options, DimnameList names
3410 ) override {
3411 const auto& out = outputs_[output_idx].get();
3412 resize_out(out, sizes, strides, options);
3413 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3414 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3415 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3416 }
3417 if (!names.empty()) {
3418 namedinference::propagate_names(outputs_[output_idx], names);
3419 }
3420 // super must happen after, so that downstream can use maybe_get_output
3421 // to retrieve the output
3422 at::native::structured_ceil_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3423 }
3424 void set_output_raw_strided(
3425 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3426 TensorOptions options, DimnameList names
3427 ) override {
3428 const auto& out = outputs_[output_idx].get();
3429 resize_out(out, sizes, strides, options);
3430 if (!names.empty()) {
3431 namedinference::propagate_names(outputs_[output_idx], names);
3432 }
3433 // super must happen after, so that downstream can use maybe_get_output
3434 // to retrieve the output
3435 at::native::structured_ceil_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3436 }
3437 const Tensor& maybe_get_output(int64_t output_idx) override {
3438 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3439 }
3440 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3441 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3442};
3443at::Tensor & wrapper_CPU_ceil_out_out(const at::Tensor & self, at::Tensor & out) {
3444structured_ceil_out_out op(out);
3445op.meta(self);
3446op.impl(self, op.maybe_get_output(0));
3447if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3448return out;
3449}
3450struct structured_ceil_out_inplace final : public at::native::structured_ceil_out {
3451 structured_ceil_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3452 void set_output_strided(
3453 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3454 TensorOptions options, DimnameList names
3455 ) override {
3456 const auto& out = outputs_[output_idx].get();
3457 check_inplace(out, sizes, options);
3458 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3459 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3460 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3461 }
3462 if (!names.empty()) {
3463 namedinference::propagate_names(outputs_[output_idx], names);
3464 }
3465 // super must happen after, so that downstream can use maybe_get_output
3466 // to retrieve the output
3467 at::native::structured_ceil_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3468 }
3469 void set_output_raw_strided(
3470 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3471 TensorOptions options, DimnameList names
3472 ) override {
3473 const auto& out = outputs_[output_idx].get();
3474 check_inplace(out, sizes, options);
3475 if (!names.empty()) {
3476 namedinference::propagate_names(outputs_[output_idx], names);
3477 }
3478 // super must happen after, so that downstream can use maybe_get_output
3479 // to retrieve the output
3480 at::native::structured_ceil_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3481 }
3482 const Tensor& maybe_get_output(int64_t output_idx) override {
3483 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3484 }
3485 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3486 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3487};
3488at::Tensor & wrapper_CPU_ceil_(at::Tensor & self) {
3489structured_ceil_out_inplace op(self);
3490op.meta(self);
3491op.impl(self, op.outputs_[0]);
3492if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3493return self;
3494}
3495struct structured_clamp_out_functional final : public at::native::structured_clamp_out {
3496 void set_output_strided(
3497 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3498 TensorOptions options, DimnameList names
3499 ) override {
3500 outputs_[output_idx] = create_out(sizes, strides, options);
3501 if (!names.empty()) {
3502 namedinference::propagate_names(*outputs_[output_idx], names);
3503 }
3504 // super must happen after, so that downstream can use maybe_get_output
3505 // to retrieve the output
3506 at::native::structured_clamp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3507 }
3508 void set_output_raw_strided(
3509 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3510 TensorOptions options, DimnameList names
3511 ) override {
3512 outputs_[output_idx] = create_out(sizes, strides, options);
3513 if (!names.empty()) {
3514 namedinference::propagate_names(*outputs_[output_idx], names);
3515 }
3516 // super must happen after, so that downstream can use maybe_get_output
3517 // to retrieve the output
3518 at::native::structured_clamp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3519 }
3520 const Tensor& maybe_get_output(int64_t output_idx) override {
3521 return *outputs_[output_idx];
3522 }
3523 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3524};
3525at::Tensor wrapper_CPU_clamp(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
3526structured_clamp_out_functional op;
3527op.meta(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()));
3528op.impl(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()), *op.outputs_[0]);
3529return std::move(op.outputs_[0]).take();
3530}
3531struct structured_clamp_out_out final : public at::native::structured_clamp_out {
3532 structured_clamp_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
3533 void set_output_strided(
3534 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3535 TensorOptions options, DimnameList names
3536 ) override {
3537 const auto& out = outputs_[output_idx].get();
3538 resize_out(out, sizes, strides, options);
3539 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3540 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3541 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3542 }
3543 if (!names.empty()) {
3544 namedinference::propagate_names(outputs_[output_idx], names);
3545 }
3546 // super must happen after, so that downstream can use maybe_get_output
3547 // to retrieve the output
3548 at::native::structured_clamp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3549 }
3550 void set_output_raw_strided(
3551 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3552 TensorOptions options, DimnameList names
3553 ) override {
3554 const auto& out = outputs_[output_idx].get();
3555 resize_out(out, sizes, strides, options);
3556 if (!names.empty()) {
3557 namedinference::propagate_names(outputs_[output_idx], names);
3558 }
3559 // super must happen after, so that downstream can use maybe_get_output
3560 // to retrieve the output
3561 at::native::structured_clamp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3562 }
3563 const Tensor& maybe_get_output(int64_t output_idx) override {
3564 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3565 }
3566 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3567 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3568};
3569at::Tensor & wrapper_CPU_clamp_out_out(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max, at::Tensor & out) {
3570structured_clamp_out_out op(out);
3571op.meta(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()));
3572op.impl(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()), op.maybe_get_output(0));
3573if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3574return out;
3575}
3576struct structured_clamp_out_inplace final : public at::native::structured_clamp_out {
3577 structured_clamp_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3578 void set_output_strided(
3579 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3580 TensorOptions options, DimnameList names
3581 ) override {
3582 const auto& out = outputs_[output_idx].get();
3583 check_inplace(out, sizes, options);
3584 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3585 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3586 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3587 }
3588 if (!names.empty()) {
3589 namedinference::propagate_names(outputs_[output_idx], names);
3590 }
3591 // super must happen after, so that downstream can use maybe_get_output
3592 // to retrieve the output
3593 at::native::structured_clamp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3594 }
3595 void set_output_raw_strided(
3596 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3597 TensorOptions options, DimnameList names
3598 ) override {
3599 const auto& out = outputs_[output_idx].get();
3600 check_inplace(out, sizes, options);
3601 if (!names.empty()) {
3602 namedinference::propagate_names(outputs_[output_idx], names);
3603 }
3604 // super must happen after, so that downstream can use maybe_get_output
3605 // to retrieve the output
3606 at::native::structured_clamp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3607 }
3608 const Tensor& maybe_get_output(int64_t output_idx) override {
3609 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3610 }
3611 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3612 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3613};
3614at::Tensor & wrapper_CPU_clamp_(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
3615structured_clamp_out_inplace op(self);
3616op.meta(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()));
3617op.impl(self, (min.has_value() ? at::OptionalScalarRef(&(min.value())) : at::OptionalScalarRef()), (max.has_value() ? at::OptionalScalarRef(&(max.value())) : at::OptionalScalarRef()), op.outputs_[0]);
3618if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3619return self;
3620}
3621struct structured_clamp_Tensor_out_functional final : public at::native::structured_clamp_Tensor_out {
3622 void set_output_strided(
3623 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3624 TensorOptions options, DimnameList names
3625 ) override {
3626 outputs_[output_idx] = create_out(sizes, strides, options);
3627 if (!names.empty()) {
3628 namedinference::propagate_names(*outputs_[output_idx], names);
3629 }
3630 // super must happen after, so that downstream can use maybe_get_output
3631 // to retrieve the output
3632 at::native::structured_clamp_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3633 }
3634 void set_output_raw_strided(
3635 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3636 TensorOptions options, DimnameList names
3637 ) override {
3638 outputs_[output_idx] = create_out(sizes, strides, options);
3639 if (!names.empty()) {
3640 namedinference::propagate_names(*outputs_[output_idx], names);
3641 }
3642 // super must happen after, so that downstream can use maybe_get_output
3643 // to retrieve the output
3644 at::native::structured_clamp_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3645 }
3646 const Tensor& maybe_get_output(int64_t output_idx) override {
3647 return *outputs_[output_idx];
3648 }
3649 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3650};
3651at::Tensor wrapper_CPU_clamp_Tensor(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
3652structured_clamp_Tensor_out_functional op;
3653op.meta(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()));
3654op.impl(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()), *op.outputs_[0]);
3655return std::move(op.outputs_[0]).take();
3656}
3657struct structured_clamp_Tensor_out_out final : public at::native::structured_clamp_Tensor_out {
3658 structured_clamp_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
3659 void set_output_strided(
3660 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3661 TensorOptions options, DimnameList names
3662 ) override {
3663 const auto& out = outputs_[output_idx].get();
3664 resize_out(out, sizes, strides, options);
3665 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3666 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3667 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3668 }
3669 if (!names.empty()) {
3670 namedinference::propagate_names(outputs_[output_idx], names);
3671 }
3672 // super must happen after, so that downstream can use maybe_get_output
3673 // to retrieve the output
3674 at::native::structured_clamp_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3675 }
3676 void set_output_raw_strided(
3677 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3678 TensorOptions options, DimnameList names
3679 ) override {
3680 const auto& out = outputs_[output_idx].get();
3681 resize_out(out, sizes, strides, options);
3682 if (!names.empty()) {
3683 namedinference::propagate_names(outputs_[output_idx], names);
3684 }
3685 // super must happen after, so that downstream can use maybe_get_output
3686 // to retrieve the output
3687 at::native::structured_clamp_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3688 }
3689 const Tensor& maybe_get_output(int64_t output_idx) override {
3690 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3691 }
3692 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3693 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3694};
3695at::Tensor & wrapper_CPU_clamp_out_Tensor_out(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max, at::Tensor & out) {
3696structured_clamp_Tensor_out_out op(out);
3697op.meta(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()));
3698op.impl(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()), op.maybe_get_output(0));
3699if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3700return out;
3701}
3702struct structured_clamp_Tensor_out_inplace final : public at::native::structured_clamp_Tensor_out {
3703 structured_clamp_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3704 void set_output_strided(
3705 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3706 TensorOptions options, DimnameList names
3707 ) override {
3708 const auto& out = outputs_[output_idx].get();
3709 check_inplace(out, sizes, options);
3710 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3711 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3712 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3713 }
3714 if (!names.empty()) {
3715 namedinference::propagate_names(outputs_[output_idx], names);
3716 }
3717 // super must happen after, so that downstream can use maybe_get_output
3718 // to retrieve the output
3719 at::native::structured_clamp_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3720 }
3721 void set_output_raw_strided(
3722 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3723 TensorOptions options, DimnameList names
3724 ) override {
3725 const auto& out = outputs_[output_idx].get();
3726 check_inplace(out, sizes, options);
3727 if (!names.empty()) {
3728 namedinference::propagate_names(outputs_[output_idx], names);
3729 }
3730 // super must happen after, so that downstream can use maybe_get_output
3731 // to retrieve the output
3732 at::native::structured_clamp_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3733 }
3734 const Tensor& maybe_get_output(int64_t output_idx) override {
3735 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3736 }
3737 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3738 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3739};
3740at::Tensor & wrapper_CPU_clamp__Tensor(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
3741structured_clamp_Tensor_out_inplace op(self);
3742op.meta(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()));
3743op.impl(self, ((min.has_value() && (*min).defined()) ? at::OptionalTensorRef(*min) : at::OptionalTensorRef()), ((max.has_value() && (*max).defined()) ? at::OptionalTensorRef(*max) : at::OptionalTensorRef()), op.outputs_[0]);
3744if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3745return self;
3746}
3747struct structured_clamp_max_out_functional final : public at::native::structured_clamp_max_out {
3748 void set_output_strided(
3749 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3750 TensorOptions options, DimnameList names
3751 ) override {
3752 outputs_[output_idx] = create_out(sizes, strides, options);
3753 if (!names.empty()) {
3754 namedinference::propagate_names(*outputs_[output_idx], names);
3755 }
3756 // super must happen after, so that downstream can use maybe_get_output
3757 // to retrieve the output
3758 at::native::structured_clamp_max_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3759 }
3760 void set_output_raw_strided(
3761 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3762 TensorOptions options, DimnameList names
3763 ) override {
3764 outputs_[output_idx] = create_out(sizes, strides, options);
3765 if (!names.empty()) {
3766 namedinference::propagate_names(*outputs_[output_idx], names);
3767 }
3768 // super must happen after, so that downstream can use maybe_get_output
3769 // to retrieve the output
3770 at::native::structured_clamp_max_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3771 }
3772 const Tensor& maybe_get_output(int64_t output_idx) override {
3773 return *outputs_[output_idx];
3774 }
3775 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3776};
3777at::Tensor wrapper_CPU_clamp_max(const at::Tensor & self, const at::Scalar & max) {
3778structured_clamp_max_out_functional op;
3779op.meta(self, max);
3780op.impl(self, max, *op.outputs_[0]);
3781return std::move(op.outputs_[0]).take();
3782}
3783struct structured_clamp_max_out_out final : public at::native::structured_clamp_max_out {
3784 structured_clamp_max_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
3785 void set_output_strided(
3786 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3787 TensorOptions options, DimnameList names
3788 ) override {
3789 const auto& out = outputs_[output_idx].get();
3790 resize_out(out, sizes, strides, options);
3791 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3792 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3793 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3794 }
3795 if (!names.empty()) {
3796 namedinference::propagate_names(outputs_[output_idx], names);
3797 }
3798 // super must happen after, so that downstream can use maybe_get_output
3799 // to retrieve the output
3800 at::native::structured_clamp_max_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3801 }
3802 void set_output_raw_strided(
3803 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3804 TensorOptions options, DimnameList names
3805 ) override {
3806 const auto& out = outputs_[output_idx].get();
3807 resize_out(out, sizes, strides, options);
3808 if (!names.empty()) {
3809 namedinference::propagate_names(outputs_[output_idx], names);
3810 }
3811 // super must happen after, so that downstream can use maybe_get_output
3812 // to retrieve the output
3813 at::native::structured_clamp_max_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3814 }
3815 const Tensor& maybe_get_output(int64_t output_idx) override {
3816 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3817 }
3818 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3819 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3820};
3821at::Tensor & wrapper_CPU_clamp_max_out_out(const at::Tensor & self, const at::Scalar & max, at::Tensor & out) {
3822structured_clamp_max_out_out op(out);
3823op.meta(self, max);
3824op.impl(self, max, op.maybe_get_output(0));
3825if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3826return out;
3827}
3828struct structured_clamp_max_out_inplace final : public at::native::structured_clamp_max_out {
3829 structured_clamp_max_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3830 void set_output_strided(
3831 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3832 TensorOptions options, DimnameList names
3833 ) override {
3834 const auto& out = outputs_[output_idx].get();
3835 check_inplace(out, sizes, options);
3836 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3837 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3838 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3839 }
3840 if (!names.empty()) {
3841 namedinference::propagate_names(outputs_[output_idx], names);
3842 }
3843 // super must happen after, so that downstream can use maybe_get_output
3844 // to retrieve the output
3845 at::native::structured_clamp_max_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3846 }
3847 void set_output_raw_strided(
3848 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3849 TensorOptions options, DimnameList names
3850 ) override {
3851 const auto& out = outputs_[output_idx].get();
3852 check_inplace(out, sizes, options);
3853 if (!names.empty()) {
3854 namedinference::propagate_names(outputs_[output_idx], names);
3855 }
3856 // super must happen after, so that downstream can use maybe_get_output
3857 // to retrieve the output
3858 at::native::structured_clamp_max_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3859 }
3860 const Tensor& maybe_get_output(int64_t output_idx) override {
3861 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3862 }
3863 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3864 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3865};
3866at::Tensor & wrapper_CPU_clamp_max_(at::Tensor & self, const at::Scalar & max) {
3867structured_clamp_max_out_inplace op(self);
3868op.meta(self, max);
3869op.impl(self, max, op.outputs_[0]);
3870if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3871return self;
3872}
3873struct structured_clamp_max_Tensor_out_functional final : public at::native::structured_clamp_max_Tensor_out {
3874 void set_output_strided(
3875 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3876 TensorOptions options, DimnameList names
3877 ) override {
3878 outputs_[output_idx] = create_out(sizes, strides, options);
3879 if (!names.empty()) {
3880 namedinference::propagate_names(*outputs_[output_idx], names);
3881 }
3882 // super must happen after, so that downstream can use maybe_get_output
3883 // to retrieve the output
3884 at::native::structured_clamp_max_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3885 }
3886 void set_output_raw_strided(
3887 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3888 TensorOptions options, DimnameList names
3889 ) override {
3890 outputs_[output_idx] = create_out(sizes, strides, options);
3891 if (!names.empty()) {
3892 namedinference::propagate_names(*outputs_[output_idx], names);
3893 }
3894 // super must happen after, so that downstream can use maybe_get_output
3895 // to retrieve the output
3896 at::native::structured_clamp_max_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3897 }
3898 const Tensor& maybe_get_output(int64_t output_idx) override {
3899 return *outputs_[output_idx];
3900 }
3901 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
3902};
3903at::Tensor wrapper_CPU_clamp_max_Tensor(const at::Tensor & self, const at::Tensor & max) {
3904structured_clamp_max_Tensor_out_functional op;
3905op.meta(self, max);
3906op.impl(self, max, *op.outputs_[0]);
3907return std::move(op.outputs_[0]).take();
3908}
3909struct structured_clamp_max_Tensor_out_out final : public at::native::structured_clamp_max_Tensor_out {
3910 structured_clamp_max_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
3911 void set_output_strided(
3912 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3913 TensorOptions options, DimnameList names
3914 ) override {
3915 const auto& out = outputs_[output_idx].get();
3916 resize_out(out, sizes, strides, options);
3917 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3918 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3919 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3920 }
3921 if (!names.empty()) {
3922 namedinference::propagate_names(outputs_[output_idx], names);
3923 }
3924 // super must happen after, so that downstream can use maybe_get_output
3925 // to retrieve the output
3926 at::native::structured_clamp_max_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3927 }
3928 void set_output_raw_strided(
3929 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3930 TensorOptions options, DimnameList names
3931 ) override {
3932 const auto& out = outputs_[output_idx].get();
3933 resize_out(out, sizes, strides, options);
3934 if (!names.empty()) {
3935 namedinference::propagate_names(outputs_[output_idx], names);
3936 }
3937 // super must happen after, so that downstream can use maybe_get_output
3938 // to retrieve the output
3939 at::native::structured_clamp_max_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3940 }
3941 const Tensor& maybe_get_output(int64_t output_idx) override {
3942 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3943 }
3944 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3945 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3946};
3947at::Tensor & wrapper_CPU_clamp_max_out_Tensor_out(const at::Tensor & self, const at::Tensor & max, at::Tensor & out) {
3948structured_clamp_max_Tensor_out_out op(out);
3949op.meta(self, max);
3950op.impl(self, max, op.maybe_get_output(0));
3951if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3952return out;
3953}
3954struct structured_clamp_max_Tensor_out_inplace final : public at::native::structured_clamp_max_Tensor_out {
3955 structured_clamp_max_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
3956 void set_output_strided(
3957 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3958 TensorOptions options, DimnameList names
3959 ) override {
3960 const auto& out = outputs_[output_idx].get();
3961 check_inplace(out, sizes, options);
3962 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
3963 if (C10_UNLIKELY(maybe_proxy.has_value())) {
3964 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
3965 }
3966 if (!names.empty()) {
3967 namedinference::propagate_names(outputs_[output_idx], names);
3968 }
3969 // super must happen after, so that downstream can use maybe_get_output
3970 // to retrieve the output
3971 at::native::structured_clamp_max_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3972 }
3973 void set_output_raw_strided(
3974 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
3975 TensorOptions options, DimnameList names
3976 ) override {
3977 const auto& out = outputs_[output_idx].get();
3978 check_inplace(out, sizes, options);
3979 if (!names.empty()) {
3980 namedinference::propagate_names(outputs_[output_idx], names);
3981 }
3982 // super must happen after, so that downstream can use maybe_get_output
3983 // to retrieve the output
3984 at::native::structured_clamp_max_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
3985 }
3986 const Tensor& maybe_get_output(int64_t output_idx) override {
3987 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
3988 }
3989 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
3990 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
3991};
3992at::Tensor & wrapper_CPU_clamp_max__Tensor(at::Tensor & self, const at::Tensor & max) {
3993structured_clamp_max_Tensor_out_inplace op(self);
3994op.meta(self, max);
3995op.impl(self, max, op.outputs_[0]);
3996if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
3997return self;
3998}
3999struct structured_clamp_min_out_functional final : public at::native::structured_clamp_min_out {
4000 void set_output_strided(
4001 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4002 TensorOptions options, DimnameList names
4003 ) override {
4004 outputs_[output_idx] = create_out(sizes, strides, options);
4005 if (!names.empty()) {
4006 namedinference::propagate_names(*outputs_[output_idx], names);
4007 }
4008 // super must happen after, so that downstream can use maybe_get_output
4009 // to retrieve the output
4010 at::native::structured_clamp_min_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4011 }
4012 void set_output_raw_strided(
4013 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4014 TensorOptions options, DimnameList names
4015 ) override {
4016 outputs_[output_idx] = create_out(sizes, strides, options);
4017 if (!names.empty()) {
4018 namedinference::propagate_names(*outputs_[output_idx], names);
4019 }
4020 // super must happen after, so that downstream can use maybe_get_output
4021 // to retrieve the output
4022 at::native::structured_clamp_min_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4023 }
4024 const Tensor& maybe_get_output(int64_t output_idx) override {
4025 return *outputs_[output_idx];
4026 }
4027 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4028};
4029at::Tensor wrapper_CPU_clamp_min(const at::Tensor & self, const at::Scalar & min) {
4030structured_clamp_min_out_functional op;
4031op.meta(self, min);
4032op.impl(self, min, *op.outputs_[0]);
4033return std::move(op.outputs_[0]).take();
4034}
4035struct structured_clamp_min_out_out final : public at::native::structured_clamp_min_out {
4036 structured_clamp_min_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
4037 void set_output_strided(
4038 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4039 TensorOptions options, DimnameList names
4040 ) override {
4041 const auto& out = outputs_[output_idx].get();
4042 resize_out(out, sizes, strides, options);
4043 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4044 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4045 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4046 }
4047 if (!names.empty()) {
4048 namedinference::propagate_names(outputs_[output_idx], names);
4049 }
4050 // super must happen after, so that downstream can use maybe_get_output
4051 // to retrieve the output
4052 at::native::structured_clamp_min_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4053 }
4054 void set_output_raw_strided(
4055 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4056 TensorOptions options, DimnameList names
4057 ) override {
4058 const auto& out = outputs_[output_idx].get();
4059 resize_out(out, sizes, strides, options);
4060 if (!names.empty()) {
4061 namedinference::propagate_names(outputs_[output_idx], names);
4062 }
4063 // super must happen after, so that downstream can use maybe_get_output
4064 // to retrieve the output
4065 at::native::structured_clamp_min_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4066 }
4067 const Tensor& maybe_get_output(int64_t output_idx) override {
4068 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4069 }
4070 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4071 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4072};
4073at::Tensor & wrapper_CPU_clamp_min_out_out(const at::Tensor & self, const at::Scalar & min, at::Tensor & out) {
4074structured_clamp_min_out_out op(out);
4075op.meta(self, min);
4076op.impl(self, min, op.maybe_get_output(0));
4077if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4078return out;
4079}
4080struct structured_clamp_min_out_inplace final : public at::native::structured_clamp_min_out {
4081 structured_clamp_min_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4082 void set_output_strided(
4083 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4084 TensorOptions options, DimnameList names
4085 ) override {
4086 const auto& out = outputs_[output_idx].get();
4087 check_inplace(out, sizes, options);
4088 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4089 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4090 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4091 }
4092 if (!names.empty()) {
4093 namedinference::propagate_names(outputs_[output_idx], names);
4094 }
4095 // super must happen after, so that downstream can use maybe_get_output
4096 // to retrieve the output
4097 at::native::structured_clamp_min_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4098 }
4099 void set_output_raw_strided(
4100 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4101 TensorOptions options, DimnameList names
4102 ) override {
4103 const auto& out = outputs_[output_idx].get();
4104 check_inplace(out, sizes, options);
4105 if (!names.empty()) {
4106 namedinference::propagate_names(outputs_[output_idx], names);
4107 }
4108 // super must happen after, so that downstream can use maybe_get_output
4109 // to retrieve the output
4110 at::native::structured_clamp_min_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4111 }
4112 const Tensor& maybe_get_output(int64_t output_idx) override {
4113 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4114 }
4115 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4116 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4117};
4118at::Tensor & wrapper_CPU_clamp_min_(at::Tensor & self, const at::Scalar & min) {
4119structured_clamp_min_out_inplace op(self);
4120op.meta(self, min);
4121op.impl(self, min, op.outputs_[0]);
4122if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4123return self;
4124}
4125struct structured_clamp_min_Tensor_out_functional final : public at::native::structured_clamp_min_Tensor_out {
4126 void set_output_strided(
4127 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4128 TensorOptions options, DimnameList names
4129 ) override {
4130 outputs_[output_idx] = create_out(sizes, strides, options);
4131 if (!names.empty()) {
4132 namedinference::propagate_names(*outputs_[output_idx], names);
4133 }
4134 // super must happen after, so that downstream can use maybe_get_output
4135 // to retrieve the output
4136 at::native::structured_clamp_min_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4137 }
4138 void set_output_raw_strided(
4139 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4140 TensorOptions options, DimnameList names
4141 ) override {
4142 outputs_[output_idx] = create_out(sizes, strides, options);
4143 if (!names.empty()) {
4144 namedinference::propagate_names(*outputs_[output_idx], names);
4145 }
4146 // super must happen after, so that downstream can use maybe_get_output
4147 // to retrieve the output
4148 at::native::structured_clamp_min_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4149 }
4150 const Tensor& maybe_get_output(int64_t output_idx) override {
4151 return *outputs_[output_idx];
4152 }
4153 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4154};
4155at::Tensor wrapper_CPU_clamp_min_Tensor(const at::Tensor & self, const at::Tensor & min) {
4156structured_clamp_min_Tensor_out_functional op;
4157op.meta(self, min);
4158op.impl(self, min, *op.outputs_[0]);
4159return std::move(op.outputs_[0]).take();
4160}
4161struct structured_clamp_min_Tensor_out_out final : public at::native::structured_clamp_min_Tensor_out {
4162 structured_clamp_min_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
4163 void set_output_strided(
4164 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4165 TensorOptions options, DimnameList names
4166 ) override {
4167 const auto& out = outputs_[output_idx].get();
4168 resize_out(out, sizes, strides, options);
4169 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4170 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4171 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4172 }
4173 if (!names.empty()) {
4174 namedinference::propagate_names(outputs_[output_idx], names);
4175 }
4176 // super must happen after, so that downstream can use maybe_get_output
4177 // to retrieve the output
4178 at::native::structured_clamp_min_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4179 }
4180 void set_output_raw_strided(
4181 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4182 TensorOptions options, DimnameList names
4183 ) override {
4184 const auto& out = outputs_[output_idx].get();
4185 resize_out(out, sizes, strides, options);
4186 if (!names.empty()) {
4187 namedinference::propagate_names(outputs_[output_idx], names);
4188 }
4189 // super must happen after, so that downstream can use maybe_get_output
4190 // to retrieve the output
4191 at::native::structured_clamp_min_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4192 }
4193 const Tensor& maybe_get_output(int64_t output_idx) override {
4194 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4195 }
4196 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4197 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4198};
4199at::Tensor & wrapper_CPU_clamp_min_out_Tensor_out(const at::Tensor & self, const at::Tensor & min, at::Tensor & out) {
4200structured_clamp_min_Tensor_out_out op(out);
4201op.meta(self, min);
4202op.impl(self, min, op.maybe_get_output(0));
4203if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4204return out;
4205}
4206struct structured_clamp_min_Tensor_out_inplace final : public at::native::structured_clamp_min_Tensor_out {
4207 structured_clamp_min_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4208 void set_output_strided(
4209 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4210 TensorOptions options, DimnameList names
4211 ) override {
4212 const auto& out = outputs_[output_idx].get();
4213 check_inplace(out, sizes, options);
4214 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4215 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4216 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4217 }
4218 if (!names.empty()) {
4219 namedinference::propagate_names(outputs_[output_idx], names);
4220 }
4221 // super must happen after, so that downstream can use maybe_get_output
4222 // to retrieve the output
4223 at::native::structured_clamp_min_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4224 }
4225 void set_output_raw_strided(
4226 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4227 TensorOptions options, DimnameList names
4228 ) override {
4229 const auto& out = outputs_[output_idx].get();
4230 check_inplace(out, sizes, options);
4231 if (!names.empty()) {
4232 namedinference::propagate_names(outputs_[output_idx], names);
4233 }
4234 // super must happen after, so that downstream can use maybe_get_output
4235 // to retrieve the output
4236 at::native::structured_clamp_min_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4237 }
4238 const Tensor& maybe_get_output(int64_t output_idx) override {
4239 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4240 }
4241 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4242 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4243};
4244at::Tensor & wrapper_CPU_clamp_min__Tensor(at::Tensor & self, const at::Tensor & min) {
4245structured_clamp_min_Tensor_out_inplace op(self);
4246op.meta(self, min);
4247op.impl(self, min, op.outputs_[0]);
4248if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4249return self;
4250}
4251namespace {
4252at::Tensor & wrapper_CPU_out_complex_out(const at::Tensor & real, const at::Tensor & imag, at::Tensor & out) {
4253 // No device check
4254 // DeviceGuard omitted
4255 return at::native::complex_out(real, imag, out);
4256}
4257} // anonymous namespace
4258namespace {
4259at::Tensor & wrapper_CPU_out_polar_out(const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out) {
4260 // No device check
4261 // DeviceGuard omitted
4262 return at::native::polar_out(abs, angle, out);
4263}
4264} // anonymous namespace
4265struct structured_cos_out_functional final : public at::native::structured_cos_out {
4266 void set_output_strided(
4267 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4268 TensorOptions options, DimnameList names
4269 ) override {
4270 outputs_[output_idx] = create_out(sizes, strides, options);
4271 if (!names.empty()) {
4272 namedinference::propagate_names(*outputs_[output_idx], names);
4273 }
4274 // super must happen after, so that downstream can use maybe_get_output
4275 // to retrieve the output
4276 at::native::structured_cos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4277 }
4278 void set_output_raw_strided(
4279 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4280 TensorOptions options, DimnameList names
4281 ) override {
4282 outputs_[output_idx] = create_out(sizes, strides, options);
4283 if (!names.empty()) {
4284 namedinference::propagate_names(*outputs_[output_idx], names);
4285 }
4286 // super must happen after, so that downstream can use maybe_get_output
4287 // to retrieve the output
4288 at::native::structured_cos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4289 }
4290 const Tensor& maybe_get_output(int64_t output_idx) override {
4291 return *outputs_[output_idx];
4292 }
4293 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4294};
4295at::Tensor wrapper_CPU_cos(const at::Tensor & self) {
4296structured_cos_out_functional op;
4297op.meta(self);
4298op.impl(self, *op.outputs_[0]);
4299return std::move(op.outputs_[0]).take();
4300}
4301struct structured_cos_out_out final : public at::native::structured_cos_out {
4302 structured_cos_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
4303 void set_output_strided(
4304 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4305 TensorOptions options, DimnameList names
4306 ) override {
4307 const auto& out = outputs_[output_idx].get();
4308 resize_out(out, sizes, strides, options);
4309 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4310 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4311 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4312 }
4313 if (!names.empty()) {
4314 namedinference::propagate_names(outputs_[output_idx], names);
4315 }
4316 // super must happen after, so that downstream can use maybe_get_output
4317 // to retrieve the output
4318 at::native::structured_cos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4319 }
4320 void set_output_raw_strided(
4321 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4322 TensorOptions options, DimnameList names
4323 ) override {
4324 const auto& out = outputs_[output_idx].get();
4325 resize_out(out, sizes, strides, options);
4326 if (!names.empty()) {
4327 namedinference::propagate_names(outputs_[output_idx], names);
4328 }
4329 // super must happen after, so that downstream can use maybe_get_output
4330 // to retrieve the output
4331 at::native::structured_cos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4332 }
4333 const Tensor& maybe_get_output(int64_t output_idx) override {
4334 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4335 }
4336 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4337 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4338};
4339at::Tensor & wrapper_CPU_cos_out_out(const at::Tensor & self, at::Tensor & out) {
4340structured_cos_out_out op(out);
4341op.meta(self);
4342op.impl(self, op.maybe_get_output(0));
4343if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4344return out;
4345}
4346struct structured_cos_out_inplace final : public at::native::structured_cos_out {
4347 structured_cos_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4348 void set_output_strided(
4349 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4350 TensorOptions options, DimnameList names
4351 ) override {
4352 const auto& out = outputs_[output_idx].get();
4353 check_inplace(out, sizes, options);
4354 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4355 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4356 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4357 }
4358 if (!names.empty()) {
4359 namedinference::propagate_names(outputs_[output_idx], names);
4360 }
4361 // super must happen after, so that downstream can use maybe_get_output
4362 // to retrieve the output
4363 at::native::structured_cos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4364 }
4365 void set_output_raw_strided(
4366 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4367 TensorOptions options, DimnameList names
4368 ) override {
4369 const auto& out = outputs_[output_idx].get();
4370 check_inplace(out, sizes, options);
4371 if (!names.empty()) {
4372 namedinference::propagate_names(outputs_[output_idx], names);
4373 }
4374 // super must happen after, so that downstream can use maybe_get_output
4375 // to retrieve the output
4376 at::native::structured_cos_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4377 }
4378 const Tensor& maybe_get_output(int64_t output_idx) override {
4379 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4380 }
4381 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4382 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4383};
4384at::Tensor & wrapper_CPU_cos_(at::Tensor & self) {
4385structured_cos_out_inplace op(self);
4386op.meta(self);
4387op.impl(self, op.outputs_[0]);
4388if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4389return self;
4390}
4391struct structured_cosh_out_functional final : public at::native::structured_cosh_out {
4392 void set_output_strided(
4393 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4394 TensorOptions options, DimnameList names
4395 ) override {
4396 outputs_[output_idx] = create_out(sizes, strides, options);
4397 if (!names.empty()) {
4398 namedinference::propagate_names(*outputs_[output_idx], names);
4399 }
4400 // super must happen after, so that downstream can use maybe_get_output
4401 // to retrieve the output
4402 at::native::structured_cosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4403 }
4404 void set_output_raw_strided(
4405 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4406 TensorOptions options, DimnameList names
4407 ) override {
4408 outputs_[output_idx] = create_out(sizes, strides, options);
4409 if (!names.empty()) {
4410 namedinference::propagate_names(*outputs_[output_idx], names);
4411 }
4412 // super must happen after, so that downstream can use maybe_get_output
4413 // to retrieve the output
4414 at::native::structured_cosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4415 }
4416 const Tensor& maybe_get_output(int64_t output_idx) override {
4417 return *outputs_[output_idx];
4418 }
4419 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4420};
4421at::Tensor wrapper_CPU_cosh(const at::Tensor & self) {
4422structured_cosh_out_functional op;
4423op.meta(self);
4424op.impl(self, *op.outputs_[0]);
4425return std::move(op.outputs_[0]).take();
4426}
4427struct structured_cosh_out_out final : public at::native::structured_cosh_out {
4428 structured_cosh_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
4429 void set_output_strided(
4430 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4431 TensorOptions options, DimnameList names
4432 ) override {
4433 const auto& out = outputs_[output_idx].get();
4434 resize_out(out, sizes, strides, options);
4435 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4436 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4437 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4438 }
4439 if (!names.empty()) {
4440 namedinference::propagate_names(outputs_[output_idx], names);
4441 }
4442 // super must happen after, so that downstream can use maybe_get_output
4443 // to retrieve the output
4444 at::native::structured_cosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4445 }
4446 void set_output_raw_strided(
4447 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4448 TensorOptions options, DimnameList names
4449 ) override {
4450 const auto& out = outputs_[output_idx].get();
4451 resize_out(out, sizes, strides, options);
4452 if (!names.empty()) {
4453 namedinference::propagate_names(outputs_[output_idx], names);
4454 }
4455 // super must happen after, so that downstream can use maybe_get_output
4456 // to retrieve the output
4457 at::native::structured_cosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4458 }
4459 const Tensor& maybe_get_output(int64_t output_idx) override {
4460 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4461 }
4462 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4463 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4464};
4465at::Tensor & wrapper_CPU_cosh_out_out(const at::Tensor & self, at::Tensor & out) {
4466structured_cosh_out_out op(out);
4467op.meta(self);
4468op.impl(self, op.maybe_get_output(0));
4469if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4470return out;
4471}
4472struct structured_cosh_out_inplace final : public at::native::structured_cosh_out {
4473 structured_cosh_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4474 void set_output_strided(
4475 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4476 TensorOptions options, DimnameList names
4477 ) override {
4478 const auto& out = outputs_[output_idx].get();
4479 check_inplace(out, sizes, options);
4480 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4481 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4482 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4483 }
4484 if (!names.empty()) {
4485 namedinference::propagate_names(outputs_[output_idx], names);
4486 }
4487 // super must happen after, so that downstream can use maybe_get_output
4488 // to retrieve the output
4489 at::native::structured_cosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4490 }
4491 void set_output_raw_strided(
4492 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4493 TensorOptions options, DimnameList names
4494 ) override {
4495 const auto& out = outputs_[output_idx].get();
4496 check_inplace(out, sizes, options);
4497 if (!names.empty()) {
4498 namedinference::propagate_names(outputs_[output_idx], names);
4499 }
4500 // super must happen after, so that downstream can use maybe_get_output
4501 // to retrieve the output
4502 at::native::structured_cosh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4503 }
4504 const Tensor& maybe_get_output(int64_t output_idx) override {
4505 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4506 }
4507 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4508 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4509};
4510at::Tensor & wrapper_CPU_cosh_(at::Tensor & self) {
4511structured_cosh_out_inplace op(self);
4512op.meta(self);
4513op.impl(self, op.outputs_[0]);
4514if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4515return self;
4516}
4517namespace {
4518at::Tensor wrapper_CPU_dim_IntList_count_nonzero(const at::Tensor & self, at::IntArrayRef dim) {
4519 // No device check
4520 // DeviceGuard omitted
4521 return at::native::count_nonzero_cpu(self, dim);
4522}
4523} // anonymous namespace
4524namespace {
4525void wrapper_CPU___cummax_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
4526 // No device check
4527 // DeviceGuard omitted
4528 return at::native::cummax_helper_cpu(self, values, indices, dim);
4529}
4530} // anonymous namespace
4531namespace {
4532void wrapper_CPU___cummin_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
4533 // No device check
4534 // DeviceGuard omitted
4535 return at::native::cummin_helper_cpu(self, values, indices, dim);
4536}
4537} // anonymous namespace
4538struct structured_cumprod_out_functional final : public at::native::structured_cumprod_out {
4539 void set_output_strided(
4540 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4541 TensorOptions options, DimnameList names
4542 ) override {
4543 outputs_[output_idx] = create_out(sizes, strides, options);
4544 if (!names.empty()) {
4545 namedinference::propagate_names(*outputs_[output_idx], names);
4546 }
4547 // super must happen after, so that downstream can use maybe_get_output
4548 // to retrieve the output
4549 }
4550 void set_output_raw_strided(
4551 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4552 TensorOptions options, DimnameList names
4553 ) override {
4554 outputs_[output_idx] = create_out(sizes, strides, options);
4555 if (!names.empty()) {
4556 namedinference::propagate_names(*outputs_[output_idx], names);
4557 }
4558 // super must happen after, so that downstream can use maybe_get_output
4559 // to retrieve the output
4560 }
4561 const Tensor& maybe_get_output(int64_t output_idx) override {
4562 return *outputs_[output_idx];
4563 }
4564 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4565};
4566at::Tensor wrapper_CPU_cumprod(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
4567structured_cumprod_out_functional op;
4568op.meta(self, dim, dtype);
4569op.impl(self, dim, dtype, *op.outputs_[0]);
4570return std::move(op.outputs_[0]).take();
4571}
4572struct structured_cumprod_out_out final : public at::native::structured_cumprod_out {
4573 structured_cumprod_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
4574 void set_output_strided(
4575 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4576 TensorOptions options, DimnameList names
4577 ) override {
4578 const auto& out = outputs_[output_idx].get();
4579 resize_out(out, sizes, strides, options);
4580 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4581 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4582 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4583 }
4584 if (!names.empty()) {
4585 namedinference::propagate_names(outputs_[output_idx], names);
4586 }
4587 // super must happen after, so that downstream can use maybe_get_output
4588 // to retrieve the output
4589 }
4590 void set_output_raw_strided(
4591 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4592 TensorOptions options, DimnameList names
4593 ) override {
4594 const auto& out = outputs_[output_idx].get();
4595 resize_out(out, sizes, strides, options);
4596 if (!names.empty()) {
4597 namedinference::propagate_names(outputs_[output_idx], names);
4598 }
4599 // super must happen after, so that downstream can use maybe_get_output
4600 // to retrieve the output
4601 }
4602 const Tensor& maybe_get_output(int64_t output_idx) override {
4603 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4604 }
4605 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4606 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4607};
4608at::Tensor & wrapper_CPU_cumprod_out_out(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
4609structured_cumprod_out_out op(out);
4610op.meta(self, dim, dtype);
4611op.impl(self, dim, dtype, op.maybe_get_output(0));
4612if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4613return out;
4614}
4615struct structured_cumprod_out_inplace final : public at::native::structured_cumprod_out {
4616 structured_cumprod_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4617 void set_output_strided(
4618 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4619 TensorOptions options, DimnameList names
4620 ) override {
4621 const auto& out = outputs_[output_idx].get();
4622 check_inplace(out, sizes, options);
4623 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4624 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4625 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4626 }
4627 if (!names.empty()) {
4628 namedinference::propagate_names(outputs_[output_idx], names);
4629 }
4630 // super must happen after, so that downstream can use maybe_get_output
4631 // to retrieve the output
4632 }
4633 void set_output_raw_strided(
4634 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4635 TensorOptions options, DimnameList names
4636 ) override {
4637 const auto& out = outputs_[output_idx].get();
4638 check_inplace(out, sizes, options);
4639 if (!names.empty()) {
4640 namedinference::propagate_names(outputs_[output_idx], names);
4641 }
4642 // super must happen after, so that downstream can use maybe_get_output
4643 // to retrieve the output
4644 }
4645 const Tensor& maybe_get_output(int64_t output_idx) override {
4646 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4647 }
4648 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4649 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4650};
4651at::Tensor & wrapper_CPU_cumprod_(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
4652structured_cumprod_out_inplace op(self);
4653op.meta(self, dim, dtype);
4654op.impl(self, dim, dtype, op.outputs_[0]);
4655if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4656return self;
4657}
4658struct structured_cumsum_out_functional final : public at::native::structured_cumsum_out {
4659 void set_output_strided(
4660 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4661 TensorOptions options, DimnameList names
4662 ) override {
4663 outputs_[output_idx] = create_out(sizes, strides, options);
4664 if (!names.empty()) {
4665 namedinference::propagate_names(*outputs_[output_idx], names);
4666 }
4667 // super must happen after, so that downstream can use maybe_get_output
4668 // to retrieve the output
4669 }
4670 void set_output_raw_strided(
4671 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4672 TensorOptions options, DimnameList names
4673 ) override {
4674 outputs_[output_idx] = create_out(sizes, strides, options);
4675 if (!names.empty()) {
4676 namedinference::propagate_names(*outputs_[output_idx], names);
4677 }
4678 // super must happen after, so that downstream can use maybe_get_output
4679 // to retrieve the output
4680 }
4681 const Tensor& maybe_get_output(int64_t output_idx) override {
4682 return *outputs_[output_idx];
4683 }
4684 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4685};
4686at::Tensor wrapper_CPU_cumsum(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
4687structured_cumsum_out_functional op;
4688op.meta(self, dim, dtype);
4689op.impl(self, dim, dtype, *op.outputs_[0]);
4690return std::move(op.outputs_[0]).take();
4691}
4692struct structured_cumsum_out_out final : public at::native::structured_cumsum_out {
4693 structured_cumsum_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
4694 void set_output_strided(
4695 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4696 TensorOptions options, DimnameList names
4697 ) override {
4698 const auto& out = outputs_[output_idx].get();
4699 resize_out(out, sizes, strides, options);
4700 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4701 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4702 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4703 }
4704 if (!names.empty()) {
4705 namedinference::propagate_names(outputs_[output_idx], names);
4706 }
4707 // super must happen after, so that downstream can use maybe_get_output
4708 // to retrieve the output
4709 }
4710 void set_output_raw_strided(
4711 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4712 TensorOptions options, DimnameList names
4713 ) override {
4714 const auto& out = outputs_[output_idx].get();
4715 resize_out(out, sizes, strides, options);
4716 if (!names.empty()) {
4717 namedinference::propagate_names(outputs_[output_idx], names);
4718 }
4719 // super must happen after, so that downstream can use maybe_get_output
4720 // to retrieve the output
4721 }
4722 const Tensor& maybe_get_output(int64_t output_idx) override {
4723 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4724 }
4725 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4726 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4727};
4728at::Tensor & wrapper_CPU_cumsum_out_out(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
4729structured_cumsum_out_out op(out);
4730op.meta(self, dim, dtype);
4731op.impl(self, dim, dtype, op.maybe_get_output(0));
4732if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4733return out;
4734}
4735struct structured_cumsum_out_inplace final : public at::native::structured_cumsum_out {
4736 structured_cumsum_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4737 void set_output_strided(
4738 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4739 TensorOptions options, DimnameList names
4740 ) override {
4741 const auto& out = outputs_[output_idx].get();
4742 check_inplace(out, sizes, options);
4743 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4744 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4745 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4746 }
4747 if (!names.empty()) {
4748 namedinference::propagate_names(outputs_[output_idx], names);
4749 }
4750 // super must happen after, so that downstream can use maybe_get_output
4751 // to retrieve the output
4752 }
4753 void set_output_raw_strided(
4754 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4755 TensorOptions options, DimnameList names
4756 ) override {
4757 const auto& out = outputs_[output_idx].get();
4758 check_inplace(out, sizes, options);
4759 if (!names.empty()) {
4760 namedinference::propagate_names(outputs_[output_idx], names);
4761 }
4762 // super must happen after, so that downstream can use maybe_get_output
4763 // to retrieve the output
4764 }
4765 const Tensor& maybe_get_output(int64_t output_idx) override {
4766 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4767 }
4768 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4769 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4770};
4771at::Tensor & wrapper_CPU_cumsum_(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
4772structured_cumsum_out_inplace op(self);
4773op.meta(self, dim, dtype);
4774op.impl(self, dim, dtype, op.outputs_[0]);
4775if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4776return self;
4777}
4778namespace {
4779::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) {
4780 // No device check
4781 // DeviceGuard omitted
4782 return at::native::ctc_loss_cpu(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
4783}
4784} // anonymous namespace
4785namespace {
4786::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_Tensor__ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) {
4787 // No device check
4788 // DeviceGuard omitted
4789 return at::native::ctc_loss_tensor(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
4790}
4791} // anonymous namespace
4792namespace {
4793at::Tensor wrapper_CPU___ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
4794 // No device check
4795 // DeviceGuard omitted
4796 return at::native::ctc_loss_backward_cpu(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
4797}
4798} // anonymous namespace
4799namespace {
4800at::Tensor wrapper_CPU_Tensor__ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
4801 // No device check
4802 // DeviceGuard omitted
4803 return at::native::ctc_loss_backward_tensor(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
4804}
4805} // anonymous namespace
4806struct structured_div_out_functional final : public at::native::structured_div_out {
4807 void set_output_strided(
4808 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4809 TensorOptions options, DimnameList names
4810 ) override {
4811 outputs_[output_idx] = create_out(sizes, strides, options);
4812 if (!names.empty()) {
4813 namedinference::propagate_names(*outputs_[output_idx], names);
4814 }
4815 // super must happen after, so that downstream can use maybe_get_output
4816 // to retrieve the output
4817 at::native::structured_div_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4818 }
4819 void set_output_raw_strided(
4820 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4821 TensorOptions options, DimnameList names
4822 ) override {
4823 outputs_[output_idx] = create_out(sizes, strides, options);
4824 if (!names.empty()) {
4825 namedinference::propagate_names(*outputs_[output_idx], names);
4826 }
4827 // super must happen after, so that downstream can use maybe_get_output
4828 // to retrieve the output
4829 at::native::structured_div_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4830 }
4831 const Tensor& maybe_get_output(int64_t output_idx) override {
4832 return *outputs_[output_idx];
4833 }
4834 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4835};
4836at::Tensor wrapper_CPU_div_Tensor(const at::Tensor & self, const at::Tensor & other) {
4837structured_div_out_functional op;
4838op.meta(self, other);
4839op.impl(self, other, *op.outputs_[0]);
4840return std::move(op.outputs_[0]).take();
4841}
4842struct structured_div_out_out final : public at::native::structured_div_out {
4843 structured_div_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
4844 void set_output_strided(
4845 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4846 TensorOptions options, DimnameList names
4847 ) override {
4848 const auto& out = outputs_[output_idx].get();
4849 resize_out(out, sizes, strides, options);
4850 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4851 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4852 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4853 }
4854 if (!names.empty()) {
4855 namedinference::propagate_names(outputs_[output_idx], names);
4856 }
4857 // super must happen after, so that downstream can use maybe_get_output
4858 // to retrieve the output
4859 at::native::structured_div_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4860 }
4861 void set_output_raw_strided(
4862 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4863 TensorOptions options, DimnameList names
4864 ) override {
4865 const auto& out = outputs_[output_idx].get();
4866 resize_out(out, sizes, strides, options);
4867 if (!names.empty()) {
4868 namedinference::propagate_names(outputs_[output_idx], names);
4869 }
4870 // super must happen after, so that downstream can use maybe_get_output
4871 // to retrieve the output
4872 at::native::structured_div_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4873 }
4874 const Tensor& maybe_get_output(int64_t output_idx) override {
4875 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4876 }
4877 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4878 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4879};
4880at::Tensor & wrapper_CPU_div_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4881structured_div_out_out op(out);
4882op.meta(self, other);
4883op.impl(self, other, op.maybe_get_output(0));
4884if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4885return out;
4886}
4887struct structured_div_out_inplace final : public at::native::structured_div_out {
4888 structured_div_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
4889 void set_output_strided(
4890 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4891 TensorOptions options, DimnameList names
4892 ) override {
4893 const auto& out = outputs_[output_idx].get();
4894 check_inplace(out, sizes, options);
4895 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4896 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4897 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4898 }
4899 if (!names.empty()) {
4900 namedinference::propagate_names(outputs_[output_idx], names);
4901 }
4902 // super must happen after, so that downstream can use maybe_get_output
4903 // to retrieve the output
4904 at::native::structured_div_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4905 }
4906 void set_output_raw_strided(
4907 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4908 TensorOptions options, DimnameList names
4909 ) override {
4910 const auto& out = outputs_[output_idx].get();
4911 check_inplace(out, sizes, options);
4912 if (!names.empty()) {
4913 namedinference::propagate_names(outputs_[output_idx], names);
4914 }
4915 // super must happen after, so that downstream can use maybe_get_output
4916 // to retrieve the output
4917 at::native::structured_div_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
4918 }
4919 const Tensor& maybe_get_output(int64_t output_idx) override {
4920 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
4921 }
4922 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
4923 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
4924};
4925at::Tensor & wrapper_CPU_div__Tensor(at::Tensor & self, const at::Tensor & other) {
4926structured_div_out_inplace op(self);
4927op.meta(self, other);
4928op.impl(self, other, op.outputs_[0]);
4929if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
4930return self;
4931}
4932struct structured_div_out_mode_functional final : public at::native::structured_div_out_mode {
4933 void set_output_strided(
4934 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4935 TensorOptions options, DimnameList names
4936 ) override {
4937 outputs_[output_idx] = create_out(sizes, strides, options);
4938 if (!names.empty()) {
4939 namedinference::propagate_names(*outputs_[output_idx], names);
4940 }
4941 // super must happen after, so that downstream can use maybe_get_output
4942 // to retrieve the output
4943 at::native::structured_div_out_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
4944 }
4945 void set_output_raw_strided(
4946 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4947 TensorOptions options, DimnameList names
4948 ) override {
4949 outputs_[output_idx] = create_out(sizes, strides, options);
4950 if (!names.empty()) {
4951 namedinference::propagate_names(*outputs_[output_idx], names);
4952 }
4953 // super must happen after, so that downstream can use maybe_get_output
4954 // to retrieve the output
4955 at::native::structured_div_out_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
4956 }
4957 const Tensor& maybe_get_output(int64_t output_idx) override {
4958 return *outputs_[output_idx];
4959 }
4960 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
4961};
4962at::Tensor wrapper_CPU_div_Tensor_mode(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
4963structured_div_out_mode_functional op;
4964op.meta(self, other, rounding_mode);
4965op.impl(self, other, rounding_mode, *op.outputs_[0]);
4966return std::move(op.outputs_[0]).take();
4967}
4968struct structured_div_out_mode_out final : public at::native::structured_div_out_mode {
4969 structured_div_out_mode_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
4970 void set_output_strided(
4971 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4972 TensorOptions options, DimnameList names
4973 ) override {
4974 const auto& out = outputs_[output_idx].get();
4975 resize_out(out, sizes, strides, options);
4976 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
4977 if (C10_UNLIKELY(maybe_proxy.has_value())) {
4978 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
4979 }
4980 if (!names.empty()) {
4981 namedinference::propagate_names(outputs_[output_idx], names);
4982 }
4983 // super must happen after, so that downstream can use maybe_get_output
4984 // to retrieve the output
4985 at::native::structured_div_out_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
4986 }
4987 void set_output_raw_strided(
4988 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
4989 TensorOptions options, DimnameList names
4990 ) override {
4991 const auto& out = outputs_[output_idx].get();
4992 resize_out(out, sizes, strides, options);
4993 if (!names.empty()) {
4994 namedinference::propagate_names(outputs_[output_idx], names);
4995 }
4996 // super must happen after, so that downstream can use maybe_get_output
4997 // to retrieve the output
4998 at::native::structured_div_out_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
4999 }
5000 const Tensor& maybe_get_output(int64_t output_idx) override {
5001 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5002 }
5003 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5004 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5005};
5006at::Tensor & wrapper_CPU_div_out_out_mode(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
5007structured_div_out_mode_out op(out);
5008op.meta(self, other, rounding_mode);
5009op.impl(self, other, rounding_mode, op.maybe_get_output(0));
5010if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5011return out;
5012}
5013struct structured_div_out_mode_inplace final : public at::native::structured_div_out_mode {
5014 structured_div_out_mode_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5015 void set_output_strided(
5016 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5017 TensorOptions options, DimnameList names
5018 ) override {
5019 const auto& out = outputs_[output_idx].get();
5020 check_inplace(out, sizes, options);
5021 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5022 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5023 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5024 }
5025 if (!names.empty()) {
5026 namedinference::propagate_names(outputs_[output_idx], names);
5027 }
5028 // super must happen after, so that downstream can use maybe_get_output
5029 // to retrieve the output
5030 at::native::structured_div_out_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
5031 }
5032 void set_output_raw_strided(
5033 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5034 TensorOptions options, DimnameList names
5035 ) override {
5036 const auto& out = outputs_[output_idx].get();
5037 check_inplace(out, sizes, options);
5038 if (!names.empty()) {
5039 namedinference::propagate_names(outputs_[output_idx], names);
5040 }
5041 // super must happen after, so that downstream can use maybe_get_output
5042 // to retrieve the output
5043 at::native::structured_div_out_mode::set_output_raw_strided(output_idx, sizes, strides, options, names);
5044 }
5045 const Tensor& maybe_get_output(int64_t output_idx) override {
5046 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5047 }
5048 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5049 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5050};
5051at::Tensor & wrapper_CPU_div__Tensor_mode(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
5052structured_div_out_mode_inplace op(self);
5053op.meta(self, other, rounding_mode);
5054op.impl(self, other, rounding_mode, op.outputs_[0]);
5055if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5056return self;
5057}
5058namespace {
5059at::Tensor wrapper_CPU__dot(const at::Tensor & self, const at::Tensor & tensor) {
5060 // No device check
5061 // DeviceGuard omitted
5062 return at::native::dot(self, tensor);
5063}
5064} // anonymous namespace
5065namespace {
5066at::Tensor wrapper_CPU__vdot(const at::Tensor & self, const at::Tensor & other) {
5067 // No device check
5068 // DeviceGuard omitted
5069 return at::native::vdot(self, other);
5070}
5071} // anonymous namespace
5072namespace {
5073at::Tensor wrapper_CPU__embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
5074 // No device check
5075 // DeviceGuard omitted
5076 return at::native::embedding_dense_backward_cpu(grad_output, indices, num_weights.expect_int(), padding_idx.expect_int(), scale_grad_by_freq);
5077}
5078} // anonymous namespace
5079namespace {
5080at::Tensor & wrapper_CPU__embedding_renorm_(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
5081 // No device check
5082 // DeviceGuard omitted
5083 return at::native::embedding_renorm_cpu_(self, indices, max_norm, norm_type);
5084}
5085} // anonymous namespace
5086namespace {
5087::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___embedding_bag_forward_only(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
5088 // No device check
5089 // DeviceGuard omitted
5090 return at::native::_embedding_bag_forward_only_cpu(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
5091}
5092} // anonymous namespace
5093namespace {
5094::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
5095 // No device check
5096 // DeviceGuard omitted
5097 return at::native::_embedding_bag_cpu(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
5098}
5099} // anonymous namespace
5100namespace {
5101at::Tensor wrapper_CPU___embedding_bag_dense_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
5102 // No device check
5103 // DeviceGuard omitted
5104 return at::native::_embedding_bag_dense_backward_cpu(grad, indices, offset2bag, bag_size, maximum_indices, num_weights.expect_int(), scale_grad_by_freq, mode, per_sample_weights, padding_idx);
5105}
5106} // anonymous namespace
5107namespace {
5108at::Tensor wrapper_CPU___embedding_bag_per_sample_weights_backward(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) {
5109 // No device check
5110 // DeviceGuard omitted
5111 return at::native::_embedding_bag_per_sample_weights_backward_cpu(grad, weight, indices, offsets, offset2bag, mode, padding_idx);
5112}
5113} // anonymous namespace
5114namespace {
5115at::Tensor wrapper_CPU_memory_format_empty(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
5116 // No device check
5117 // DeviceGuard omitted
5118 return at::native::empty_cpu(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, memory_format);
5119}
5120} // anonymous namespace
5121namespace {
5122at::Tensor wrapper_CPU___empty_affine_quantized(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) {
5123 // No device check
5124 // DeviceGuard omitted
5125 return at::native::empty_affine_quantized_other_backends_stub(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format);
5126}
5127} // anonymous namespace
5128namespace {
5129at::Tensor wrapper_CPU___empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
5130 // No device check
5131 // DeviceGuard omitted
5132 return at::native::empty_per_channel_affine_quantized_other_backends_stub(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
5133}
5134} // anonymous namespace
5135namespace {
5136const at::Tensor & wrapper_CPU__resize_(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
5137 // No device check
5138 // DeviceGuard omitted
5139 return at::native::resize_(self, C10_AS_INTARRAYREF_SLOW(size), memory_format);
5140}
5141} // anonymous namespace
5142namespace {
5143at::Tensor wrapper_CPU__empty_strided(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5144 // No device check
5145 // DeviceGuard omitted
5146 return at::native::empty_strided_cpu(C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride), dtype, layout, device, pin_memory);
5147}
5148} // anonymous namespace
5149struct structured_erf_out_functional final : public at::native::structured_erf_out {
5150 void set_output_strided(
5151 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5152 TensorOptions options, DimnameList names
5153 ) override {
5154 outputs_[output_idx] = create_out(sizes, strides, options);
5155 if (!names.empty()) {
5156 namedinference::propagate_names(*outputs_[output_idx], names);
5157 }
5158 // super must happen after, so that downstream can use maybe_get_output
5159 // to retrieve the output
5160 at::native::structured_erf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5161 }
5162 void set_output_raw_strided(
5163 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5164 TensorOptions options, DimnameList names
5165 ) override {
5166 outputs_[output_idx] = create_out(sizes, strides, options);
5167 if (!names.empty()) {
5168 namedinference::propagate_names(*outputs_[output_idx], names);
5169 }
5170 // super must happen after, so that downstream can use maybe_get_output
5171 // to retrieve the output
5172 at::native::structured_erf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5173 }
5174 const Tensor& maybe_get_output(int64_t output_idx) override {
5175 return *outputs_[output_idx];
5176 }
5177 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5178};
5179at::Tensor wrapper_CPU_erf(const at::Tensor & self) {
5180structured_erf_out_functional op;
5181op.meta(self);
5182op.impl(self, *op.outputs_[0]);
5183return std::move(op.outputs_[0]).take();
5184}
5185struct structured_erf_out_out final : public at::native::structured_erf_out {
5186 structured_erf_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
5187 void set_output_strided(
5188 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5189 TensorOptions options, DimnameList names
5190 ) override {
5191 const auto& out = outputs_[output_idx].get();
5192 resize_out(out, sizes, strides, options);
5193 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5194 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5195 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5196 }
5197 if (!names.empty()) {
5198 namedinference::propagate_names(outputs_[output_idx], names);
5199 }
5200 // super must happen after, so that downstream can use maybe_get_output
5201 // to retrieve the output
5202 at::native::structured_erf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5203 }
5204 void set_output_raw_strided(
5205 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5206 TensorOptions options, DimnameList names
5207 ) override {
5208 const auto& out = outputs_[output_idx].get();
5209 resize_out(out, sizes, strides, options);
5210 if (!names.empty()) {
5211 namedinference::propagate_names(outputs_[output_idx], names);
5212 }
5213 // super must happen after, so that downstream can use maybe_get_output
5214 // to retrieve the output
5215 at::native::structured_erf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5216 }
5217 const Tensor& maybe_get_output(int64_t output_idx) override {
5218 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5219 }
5220 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5221 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5222};
5223at::Tensor & wrapper_CPU_erf_out_out(const at::Tensor & self, at::Tensor & out) {
5224structured_erf_out_out op(out);
5225op.meta(self);
5226op.impl(self, op.maybe_get_output(0));
5227if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5228return out;
5229}
5230struct structured_erf_out_inplace final : public at::native::structured_erf_out {
5231 structured_erf_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5232 void set_output_strided(
5233 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5234 TensorOptions options, DimnameList names
5235 ) override {
5236 const auto& out = outputs_[output_idx].get();
5237 check_inplace(out, sizes, options);
5238 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5239 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5240 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5241 }
5242 if (!names.empty()) {
5243 namedinference::propagate_names(outputs_[output_idx], names);
5244 }
5245 // super must happen after, so that downstream can use maybe_get_output
5246 // to retrieve the output
5247 at::native::structured_erf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5248 }
5249 void set_output_raw_strided(
5250 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5251 TensorOptions options, DimnameList names
5252 ) override {
5253 const auto& out = outputs_[output_idx].get();
5254 check_inplace(out, sizes, options);
5255 if (!names.empty()) {
5256 namedinference::propagate_names(outputs_[output_idx], names);
5257 }
5258 // super must happen after, so that downstream can use maybe_get_output
5259 // to retrieve the output
5260 at::native::structured_erf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5261 }
5262 const Tensor& maybe_get_output(int64_t output_idx) override {
5263 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5264 }
5265 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5266 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5267};
5268at::Tensor & wrapper_CPU_erf_(at::Tensor & self) {
5269structured_erf_out_inplace op(self);
5270op.meta(self);
5271op.impl(self, op.outputs_[0]);
5272if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5273return self;
5274}
5275struct structured_erfc_out_functional final : public at::native::structured_erfc_out {
5276 void set_output_strided(
5277 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5278 TensorOptions options, DimnameList names
5279 ) override {
5280 outputs_[output_idx] = create_out(sizes, strides, options);
5281 if (!names.empty()) {
5282 namedinference::propagate_names(*outputs_[output_idx], names);
5283 }
5284 // super must happen after, so that downstream can use maybe_get_output
5285 // to retrieve the output
5286 at::native::structured_erfc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5287 }
5288 void set_output_raw_strided(
5289 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5290 TensorOptions options, DimnameList names
5291 ) override {
5292 outputs_[output_idx] = create_out(sizes, strides, options);
5293 if (!names.empty()) {
5294 namedinference::propagate_names(*outputs_[output_idx], names);
5295 }
5296 // super must happen after, so that downstream can use maybe_get_output
5297 // to retrieve the output
5298 at::native::structured_erfc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5299 }
5300 const Tensor& maybe_get_output(int64_t output_idx) override {
5301 return *outputs_[output_idx];
5302 }
5303 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5304};
5305at::Tensor wrapper_CPU_erfc(const at::Tensor & self) {
5306structured_erfc_out_functional op;
5307op.meta(self);
5308op.impl(self, *op.outputs_[0]);
5309return std::move(op.outputs_[0]).take();
5310}
5311struct structured_erfc_out_out final : public at::native::structured_erfc_out {
5312 structured_erfc_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
5313 void set_output_strided(
5314 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5315 TensorOptions options, DimnameList names
5316 ) override {
5317 const auto& out = outputs_[output_idx].get();
5318 resize_out(out, sizes, strides, options);
5319 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5320 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5321 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5322 }
5323 if (!names.empty()) {
5324 namedinference::propagate_names(outputs_[output_idx], names);
5325 }
5326 // super must happen after, so that downstream can use maybe_get_output
5327 // to retrieve the output
5328 at::native::structured_erfc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5329 }
5330 void set_output_raw_strided(
5331 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5332 TensorOptions options, DimnameList names
5333 ) override {
5334 const auto& out = outputs_[output_idx].get();
5335 resize_out(out, sizes, strides, options);
5336 if (!names.empty()) {
5337 namedinference::propagate_names(outputs_[output_idx], names);
5338 }
5339 // super must happen after, so that downstream can use maybe_get_output
5340 // to retrieve the output
5341 at::native::structured_erfc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5342 }
5343 const Tensor& maybe_get_output(int64_t output_idx) override {
5344 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5345 }
5346 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5347 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5348};
5349at::Tensor & wrapper_CPU_erfc_out_out(const at::Tensor & self, at::Tensor & out) {
5350structured_erfc_out_out op(out);
5351op.meta(self);
5352op.impl(self, op.maybe_get_output(0));
5353if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5354return out;
5355}
5356struct structured_erfc_out_inplace final : public at::native::structured_erfc_out {
5357 structured_erfc_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5358 void set_output_strided(
5359 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5360 TensorOptions options, DimnameList names
5361 ) override {
5362 const auto& out = outputs_[output_idx].get();
5363 check_inplace(out, sizes, options);
5364 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5365 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5366 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5367 }
5368 if (!names.empty()) {
5369 namedinference::propagate_names(outputs_[output_idx], names);
5370 }
5371 // super must happen after, so that downstream can use maybe_get_output
5372 // to retrieve the output
5373 at::native::structured_erfc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5374 }
5375 void set_output_raw_strided(
5376 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5377 TensorOptions options, DimnameList names
5378 ) override {
5379 const auto& out = outputs_[output_idx].get();
5380 check_inplace(out, sizes, options);
5381 if (!names.empty()) {
5382 namedinference::propagate_names(outputs_[output_idx], names);
5383 }
5384 // super must happen after, so that downstream can use maybe_get_output
5385 // to retrieve the output
5386 at::native::structured_erfc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5387 }
5388 const Tensor& maybe_get_output(int64_t output_idx) override {
5389 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5390 }
5391 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5392 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5393};
5394at::Tensor & wrapper_CPU_erfc_(at::Tensor & self) {
5395structured_erfc_out_inplace op(self);
5396op.meta(self);
5397op.impl(self, op.outputs_[0]);
5398if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5399return self;
5400}
5401struct structured_exp_out_functional final : public at::native::structured_exp_out {
5402 void set_output_strided(
5403 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5404 TensorOptions options, DimnameList names
5405 ) override {
5406 outputs_[output_idx] = create_out(sizes, strides, options);
5407 if (!names.empty()) {
5408 namedinference::propagate_names(*outputs_[output_idx], names);
5409 }
5410 // super must happen after, so that downstream can use maybe_get_output
5411 // to retrieve the output
5412 at::native::structured_exp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5413 }
5414 void set_output_raw_strided(
5415 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5416 TensorOptions options, DimnameList names
5417 ) override {
5418 outputs_[output_idx] = create_out(sizes, strides, options);
5419 if (!names.empty()) {
5420 namedinference::propagate_names(*outputs_[output_idx], names);
5421 }
5422 // super must happen after, so that downstream can use maybe_get_output
5423 // to retrieve the output
5424 at::native::structured_exp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5425 }
5426 const Tensor& maybe_get_output(int64_t output_idx) override {
5427 return *outputs_[output_idx];
5428 }
5429 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5430};
5431at::Tensor wrapper_CPU_exp(const at::Tensor & self) {
5432structured_exp_out_functional op;
5433op.meta(self);
5434op.impl(self, *op.outputs_[0]);
5435return std::move(op.outputs_[0]).take();
5436}
5437struct structured_exp_out_out final : public at::native::structured_exp_out {
5438 structured_exp_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
5439 void set_output_strided(
5440 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5441 TensorOptions options, DimnameList names
5442 ) override {
5443 const auto& out = outputs_[output_idx].get();
5444 resize_out(out, sizes, strides, options);
5445 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5446 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5447 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5448 }
5449 if (!names.empty()) {
5450 namedinference::propagate_names(outputs_[output_idx], names);
5451 }
5452 // super must happen after, so that downstream can use maybe_get_output
5453 // to retrieve the output
5454 at::native::structured_exp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5455 }
5456 void set_output_raw_strided(
5457 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5458 TensorOptions options, DimnameList names
5459 ) override {
5460 const auto& out = outputs_[output_idx].get();
5461 resize_out(out, sizes, strides, options);
5462 if (!names.empty()) {
5463 namedinference::propagate_names(outputs_[output_idx], names);
5464 }
5465 // super must happen after, so that downstream can use maybe_get_output
5466 // to retrieve the output
5467 at::native::structured_exp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5468 }
5469 const Tensor& maybe_get_output(int64_t output_idx) override {
5470 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5471 }
5472 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5473 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5474};
5475at::Tensor & wrapper_CPU_exp_out_out(const at::Tensor & self, at::Tensor & out) {
5476structured_exp_out_out op(out);
5477op.meta(self);
5478op.impl(self, op.maybe_get_output(0));
5479if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5480return out;
5481}
5482struct structured_exp_out_inplace final : public at::native::structured_exp_out {
5483 structured_exp_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5484 void set_output_strided(
5485 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5486 TensorOptions options, DimnameList names
5487 ) override {
5488 const auto& out = outputs_[output_idx].get();
5489 check_inplace(out, sizes, options);
5490 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5491 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5492 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5493 }
5494 if (!names.empty()) {
5495 namedinference::propagate_names(outputs_[output_idx], names);
5496 }
5497 // super must happen after, so that downstream can use maybe_get_output
5498 // to retrieve the output
5499 at::native::structured_exp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5500 }
5501 void set_output_raw_strided(
5502 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5503 TensorOptions options, DimnameList names
5504 ) override {
5505 const auto& out = outputs_[output_idx].get();
5506 check_inplace(out, sizes, options);
5507 if (!names.empty()) {
5508 namedinference::propagate_names(outputs_[output_idx], names);
5509 }
5510 // super must happen after, so that downstream can use maybe_get_output
5511 // to retrieve the output
5512 at::native::structured_exp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5513 }
5514 const Tensor& maybe_get_output(int64_t output_idx) override {
5515 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5516 }
5517 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5518 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5519};
5520at::Tensor & wrapper_CPU_exp_(at::Tensor & self) {
5521structured_exp_out_inplace op(self);
5522op.meta(self);
5523op.impl(self, op.outputs_[0]);
5524if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5525return self;
5526}
5527struct structured_exp2_out_functional final : public at::native::structured_exp2_out {
5528 void set_output_strided(
5529 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5530 TensorOptions options, DimnameList names
5531 ) override {
5532 outputs_[output_idx] = create_out(sizes, strides, options);
5533 if (!names.empty()) {
5534 namedinference::propagate_names(*outputs_[output_idx], names);
5535 }
5536 // super must happen after, so that downstream can use maybe_get_output
5537 // to retrieve the output
5538 at::native::structured_exp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5539 }
5540 void set_output_raw_strided(
5541 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5542 TensorOptions options, DimnameList names
5543 ) override {
5544 outputs_[output_idx] = create_out(sizes, strides, options);
5545 if (!names.empty()) {
5546 namedinference::propagate_names(*outputs_[output_idx], names);
5547 }
5548 // super must happen after, so that downstream can use maybe_get_output
5549 // to retrieve the output
5550 at::native::structured_exp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5551 }
5552 const Tensor& maybe_get_output(int64_t output_idx) override {
5553 return *outputs_[output_idx];
5554 }
5555 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5556};
5557at::Tensor wrapper_CPU_exp2(const at::Tensor & self) {
5558structured_exp2_out_functional op;
5559op.meta(self);
5560op.impl(self, *op.outputs_[0]);
5561return std::move(op.outputs_[0]).take();
5562}
5563struct structured_exp2_out_out final : public at::native::structured_exp2_out {
5564 structured_exp2_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
5565 void set_output_strided(
5566 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5567 TensorOptions options, DimnameList names
5568 ) override {
5569 const auto& out = outputs_[output_idx].get();
5570 resize_out(out, sizes, strides, options);
5571 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5572 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5573 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5574 }
5575 if (!names.empty()) {
5576 namedinference::propagate_names(outputs_[output_idx], names);
5577 }
5578 // super must happen after, so that downstream can use maybe_get_output
5579 // to retrieve the output
5580 at::native::structured_exp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5581 }
5582 void set_output_raw_strided(
5583 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5584 TensorOptions options, DimnameList names
5585 ) override {
5586 const auto& out = outputs_[output_idx].get();
5587 resize_out(out, sizes, strides, options);
5588 if (!names.empty()) {
5589 namedinference::propagate_names(outputs_[output_idx], names);
5590 }
5591 // super must happen after, so that downstream can use maybe_get_output
5592 // to retrieve the output
5593 at::native::structured_exp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5594 }
5595 const Tensor& maybe_get_output(int64_t output_idx) override {
5596 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5597 }
5598 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5599 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5600};
5601at::Tensor & wrapper_CPU_exp2_out_out(const at::Tensor & self, at::Tensor & out) {
5602structured_exp2_out_out op(out);
5603op.meta(self);
5604op.impl(self, op.maybe_get_output(0));
5605if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5606return out;
5607}
5608struct structured_exp2_out_inplace final : public at::native::structured_exp2_out {
5609 structured_exp2_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5610 void set_output_strided(
5611 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5612 TensorOptions options, DimnameList names
5613 ) override {
5614 const auto& out = outputs_[output_idx].get();
5615 check_inplace(out, sizes, options);
5616 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5617 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5618 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5619 }
5620 if (!names.empty()) {
5621 namedinference::propagate_names(outputs_[output_idx], names);
5622 }
5623 // super must happen after, so that downstream can use maybe_get_output
5624 // to retrieve the output
5625 at::native::structured_exp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5626 }
5627 void set_output_raw_strided(
5628 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5629 TensorOptions options, DimnameList names
5630 ) override {
5631 const auto& out = outputs_[output_idx].get();
5632 check_inplace(out, sizes, options);
5633 if (!names.empty()) {
5634 namedinference::propagate_names(outputs_[output_idx], names);
5635 }
5636 // super must happen after, so that downstream can use maybe_get_output
5637 // to retrieve the output
5638 at::native::structured_exp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5639 }
5640 const Tensor& maybe_get_output(int64_t output_idx) override {
5641 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5642 }
5643 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5644 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5645};
5646at::Tensor & wrapper_CPU_exp2_(at::Tensor & self) {
5647structured_exp2_out_inplace op(self);
5648op.meta(self);
5649op.impl(self, op.outputs_[0]);
5650if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5651return self;
5652}
5653struct structured_expm1_out_functional final : public at::native::structured_expm1_out {
5654 void set_output_strided(
5655 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5656 TensorOptions options, DimnameList names
5657 ) override {
5658 outputs_[output_idx] = create_out(sizes, strides, options);
5659 if (!names.empty()) {
5660 namedinference::propagate_names(*outputs_[output_idx], names);
5661 }
5662 // super must happen after, so that downstream can use maybe_get_output
5663 // to retrieve the output
5664 at::native::structured_expm1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5665 }
5666 void set_output_raw_strided(
5667 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5668 TensorOptions options, DimnameList names
5669 ) override {
5670 outputs_[output_idx] = create_out(sizes, strides, options);
5671 if (!names.empty()) {
5672 namedinference::propagate_names(*outputs_[output_idx], names);
5673 }
5674 // super must happen after, so that downstream can use maybe_get_output
5675 // to retrieve the output
5676 at::native::structured_expm1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5677 }
5678 const Tensor& maybe_get_output(int64_t output_idx) override {
5679 return *outputs_[output_idx];
5680 }
5681 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5682};
5683at::Tensor wrapper_CPU_expm1(const at::Tensor & self) {
5684structured_expm1_out_functional op;
5685op.meta(self);
5686op.impl(self, *op.outputs_[0]);
5687return std::move(op.outputs_[0]).take();
5688}
5689struct structured_expm1_out_out final : public at::native::structured_expm1_out {
5690 structured_expm1_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
5691 void set_output_strided(
5692 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5693 TensorOptions options, DimnameList names
5694 ) override {
5695 const auto& out = outputs_[output_idx].get();
5696 resize_out(out, sizes, strides, options);
5697 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5698 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5699 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5700 }
5701 if (!names.empty()) {
5702 namedinference::propagate_names(outputs_[output_idx], names);
5703 }
5704 // super must happen after, so that downstream can use maybe_get_output
5705 // to retrieve the output
5706 at::native::structured_expm1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5707 }
5708 void set_output_raw_strided(
5709 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5710 TensorOptions options, DimnameList names
5711 ) override {
5712 const auto& out = outputs_[output_idx].get();
5713 resize_out(out, sizes, strides, options);
5714 if (!names.empty()) {
5715 namedinference::propagate_names(outputs_[output_idx], names);
5716 }
5717 // super must happen after, so that downstream can use maybe_get_output
5718 // to retrieve the output
5719 at::native::structured_expm1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5720 }
5721 const Tensor& maybe_get_output(int64_t output_idx) override {
5722 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5723 }
5724 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5725 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5726};
5727at::Tensor & wrapper_CPU_expm1_out_out(const at::Tensor & self, at::Tensor & out) {
5728structured_expm1_out_out op(out);
5729op.meta(self);
5730op.impl(self, op.maybe_get_output(0));
5731if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5732return out;
5733}
5734struct structured_expm1_out_inplace final : public at::native::structured_expm1_out {
5735 structured_expm1_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5736 void set_output_strided(
5737 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5738 TensorOptions options, DimnameList names
5739 ) override {
5740 const auto& out = outputs_[output_idx].get();
5741 check_inplace(out, sizes, options);
5742 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5743 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5744 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5745 }
5746 if (!names.empty()) {
5747 namedinference::propagate_names(outputs_[output_idx], names);
5748 }
5749 // super must happen after, so that downstream can use maybe_get_output
5750 // to retrieve the output
5751 at::native::structured_expm1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5752 }
5753 void set_output_raw_strided(
5754 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5755 TensorOptions options, DimnameList names
5756 ) override {
5757 const auto& out = outputs_[output_idx].get();
5758 check_inplace(out, sizes, options);
5759 if (!names.empty()) {
5760 namedinference::propagate_names(outputs_[output_idx], names);
5761 }
5762 // super must happen after, so that downstream can use maybe_get_output
5763 // to retrieve the output
5764 at::native::structured_expm1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5765 }
5766 const Tensor& maybe_get_output(int64_t output_idx) override {
5767 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5768 }
5769 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5770 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5771};
5772at::Tensor & wrapper_CPU_expm1_(at::Tensor & self) {
5773structured_expm1_out_inplace op(self);
5774op.meta(self);
5775op.impl(self, op.outputs_[0]);
5776if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5777return self;
5778}
5779namespace {
5780at::Tensor & wrapper_CPU_out_eye_out(int64_t n, at::Tensor & out) {
5781 // No device check
5782 // DeviceGuard omitted
5783 return at::native::eye_out_cpu(n, out);
5784}
5785} // anonymous namespace
5786namespace {
5787at::Tensor & wrapper_CPU_m_out_eye_out(int64_t n, int64_t m, at::Tensor & out) {
5788 // No device check
5789 // DeviceGuard omitted
5790 return at::native::eye_out_cpu(n, m, out);
5791}
5792} // anonymous namespace
5793namespace {
5794at::Tensor & wrapper_CPU_Scalar_fill_(at::Tensor & self, const at::Scalar & value) {
5795 // No device check
5796 // DeviceGuard omitted
5797 return at::native::fill_(self, value);
5798}
5799} // anonymous namespace
5800namespace {
5801at::Tensor & wrapper_CPU_Tensor_fill_(at::Tensor & self, const at::Tensor & value) {
5802 // No device check
5803 // DeviceGuard omitted
5804 return at::native::fill_(self, value);
5805}
5806} // anonymous namespace
5807struct structured_floor_out_functional final : public at::native::structured_floor_out {
5808 void set_output_strided(
5809 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5810 TensorOptions options, DimnameList names
5811 ) override {
5812 outputs_[output_idx] = create_out(sizes, strides, options);
5813 if (!names.empty()) {
5814 namedinference::propagate_names(*outputs_[output_idx], names);
5815 }
5816 // super must happen after, so that downstream can use maybe_get_output
5817 // to retrieve the output
5818 at::native::structured_floor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5819 }
5820 void set_output_raw_strided(
5821 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5822 TensorOptions options, DimnameList names
5823 ) override {
5824 outputs_[output_idx] = create_out(sizes, strides, options);
5825 if (!names.empty()) {
5826 namedinference::propagate_names(*outputs_[output_idx], names);
5827 }
5828 // super must happen after, so that downstream can use maybe_get_output
5829 // to retrieve the output
5830 at::native::structured_floor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5831 }
5832 const Tensor& maybe_get_output(int64_t output_idx) override {
5833 return *outputs_[output_idx];
5834 }
5835 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5836};
5837at::Tensor wrapper_CPU_floor(const at::Tensor & self) {
5838structured_floor_out_functional op;
5839op.meta(self);
5840op.impl(self, *op.outputs_[0]);
5841return std::move(op.outputs_[0]).take();
5842}
5843struct structured_floor_out_out final : public at::native::structured_floor_out {
5844 structured_floor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
5845 void set_output_strided(
5846 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5847 TensorOptions options, DimnameList names
5848 ) override {
5849 const auto& out = outputs_[output_idx].get();
5850 resize_out(out, sizes, strides, options);
5851 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5852 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5853 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5854 }
5855 if (!names.empty()) {
5856 namedinference::propagate_names(outputs_[output_idx], names);
5857 }
5858 // super must happen after, so that downstream can use maybe_get_output
5859 // to retrieve the output
5860 at::native::structured_floor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5861 }
5862 void set_output_raw_strided(
5863 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5864 TensorOptions options, DimnameList names
5865 ) override {
5866 const auto& out = outputs_[output_idx].get();
5867 resize_out(out, sizes, strides, options);
5868 if (!names.empty()) {
5869 namedinference::propagate_names(outputs_[output_idx], names);
5870 }
5871 // super must happen after, so that downstream can use maybe_get_output
5872 // to retrieve the output
5873 at::native::structured_floor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5874 }
5875 const Tensor& maybe_get_output(int64_t output_idx) override {
5876 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5877 }
5878 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5879 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5880};
5881at::Tensor & wrapper_CPU_floor_out_out(const at::Tensor & self, at::Tensor & out) {
5882structured_floor_out_out op(out);
5883op.meta(self);
5884op.impl(self, op.maybe_get_output(0));
5885if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5886return out;
5887}
5888struct structured_floor_out_inplace final : public at::native::structured_floor_out {
5889 structured_floor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
5890 void set_output_strided(
5891 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5892 TensorOptions options, DimnameList names
5893 ) override {
5894 const auto& out = outputs_[output_idx].get();
5895 check_inplace(out, sizes, options);
5896 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5897 if (C10_UNLIKELY(maybe_proxy.has_value())) {
5898 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
5899 }
5900 if (!names.empty()) {
5901 namedinference::propagate_names(outputs_[output_idx], names);
5902 }
5903 // super must happen after, so that downstream can use maybe_get_output
5904 // to retrieve the output
5905 at::native::structured_floor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5906 }
5907 void set_output_raw_strided(
5908 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5909 TensorOptions options, DimnameList names
5910 ) override {
5911 const auto& out = outputs_[output_idx].get();
5912 check_inplace(out, sizes, options);
5913 if (!names.empty()) {
5914 namedinference::propagate_names(outputs_[output_idx], names);
5915 }
5916 // super must happen after, so that downstream can use maybe_get_output
5917 // to retrieve the output
5918 at::native::structured_floor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5919 }
5920 const Tensor& maybe_get_output(int64_t output_idx) override {
5921 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
5922 }
5923 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
5924 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
5925};
5926at::Tensor & wrapper_CPU_floor_(at::Tensor & self) {
5927structured_floor_out_inplace op(self);
5928op.meta(self);
5929op.impl(self, op.outputs_[0]);
5930if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
5931return self;
5932}
5933namespace {
5934at::Tensor wrapper_CPU__floor_divide(const at::Tensor & self, const at::Tensor & other) {
5935 // No device check
5936 // DeviceGuard omitted
5937 return at::native::floor_divide(self, other);
5938}
5939} // anonymous namespace
5940namespace {
5941at::Tensor & wrapper_CPU_out_floor_divide_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
5942 // No device check
5943 // DeviceGuard omitted
5944 return at::native::floor_divide_out(self, other, out);
5945}
5946} // anonymous namespace
5947namespace {
5948at::Tensor & wrapper_CPU_Tensor_floor_divide_(at::Tensor & self, const at::Tensor & other) {
5949 // No device check
5950 // DeviceGuard omitted
5951 return at::native::floor_divide_(self, other);
5952}
5953} // anonymous namespace
5954struct structured_frac_out_functional final : public at::native::structured_frac_out {
5955 void set_output_strided(
5956 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5957 TensorOptions options, DimnameList names
5958 ) override {
5959 outputs_[output_idx] = create_out(sizes, strides, options);
5960 if (!names.empty()) {
5961 namedinference::propagate_names(*outputs_[output_idx], names);
5962 }
5963 // super must happen after, so that downstream can use maybe_get_output
5964 // to retrieve the output
5965 at::native::structured_frac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5966 }
5967 void set_output_raw_strided(
5968 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5969 TensorOptions options, DimnameList names
5970 ) override {
5971 outputs_[output_idx] = create_out(sizes, strides, options);
5972 if (!names.empty()) {
5973 namedinference::propagate_names(*outputs_[output_idx], names);
5974 }
5975 // super must happen after, so that downstream can use maybe_get_output
5976 // to retrieve the output
5977 at::native::structured_frac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
5978 }
5979 const Tensor& maybe_get_output(int64_t output_idx) override {
5980 return *outputs_[output_idx];
5981 }
5982 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
5983};
5984at::Tensor wrapper_CPU_frac(const at::Tensor & self) {
5985structured_frac_out_functional op;
5986op.meta(self);
5987op.impl(self, *op.outputs_[0]);
5988return std::move(op.outputs_[0]).take();
5989}
5990struct structured_frac_out_out final : public at::native::structured_frac_out {
5991 structured_frac_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
5992 void set_output_strided(
5993 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
5994 TensorOptions options, DimnameList names
5995 ) override {
5996 const auto& out = outputs_[output_idx].get();
5997 resize_out(out, sizes, strides, options);
5998 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
5999 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6000 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6001 }
6002 if (!names.empty()) {
6003 namedinference::propagate_names(outputs_[output_idx], names);
6004 }
6005 // super must happen after, so that downstream can use maybe_get_output
6006 // to retrieve the output
6007 at::native::structured_frac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6008 }
6009 void set_output_raw_strided(
6010 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6011 TensorOptions options, DimnameList names
6012 ) override {
6013 const auto& out = outputs_[output_idx].get();
6014 resize_out(out, sizes, strides, options);
6015 if (!names.empty()) {
6016 namedinference::propagate_names(outputs_[output_idx], names);
6017 }
6018 // super must happen after, so that downstream can use maybe_get_output
6019 // to retrieve the output
6020 at::native::structured_frac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6021 }
6022 const Tensor& maybe_get_output(int64_t output_idx) override {
6023 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6024 }
6025 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6026 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6027};
6028at::Tensor & wrapper_CPU_frac_out_out(const at::Tensor & self, at::Tensor & out) {
6029structured_frac_out_out op(out);
6030op.meta(self);
6031op.impl(self, op.maybe_get_output(0));
6032if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6033return out;
6034}
6035struct structured_frac_out_inplace final : public at::native::structured_frac_out {
6036 structured_frac_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
6037 void set_output_strided(
6038 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6039 TensorOptions options, DimnameList names
6040 ) override {
6041 const auto& out = outputs_[output_idx].get();
6042 check_inplace(out, sizes, options);
6043 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6044 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6045 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6046 }
6047 if (!names.empty()) {
6048 namedinference::propagate_names(outputs_[output_idx], names);
6049 }
6050 // super must happen after, so that downstream can use maybe_get_output
6051 // to retrieve the output
6052 at::native::structured_frac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6053 }
6054 void set_output_raw_strided(
6055 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6056 TensorOptions options, DimnameList names
6057 ) override {
6058 const auto& out = outputs_[output_idx].get();
6059 check_inplace(out, sizes, options);
6060 if (!names.empty()) {
6061 namedinference::propagate_names(outputs_[output_idx], names);
6062 }
6063 // super must happen after, so that downstream can use maybe_get_output
6064 // to retrieve the output
6065 at::native::structured_frac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6066 }
6067 const Tensor& maybe_get_output(int64_t output_idx) override {
6068 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6069 }
6070 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6071 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6072};
6073at::Tensor & wrapper_CPU_frac_(at::Tensor & self) {
6074structured_frac_out_inplace op(self);
6075op.meta(self);
6076op.impl(self, op.outputs_[0]);
6077if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6078return self;
6079}
6080namespace {
6081at::Tensor wrapper_CPU__from_file(c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6082 // No device check
6083 // DeviceGuard omitted
6084 return at::native::from_file(filename, shared, size, dtype, layout, device, pin_memory);
6085}
6086} // anonymous namespace
6087struct structured_gcd_out_functional final : public at::native::structured_gcd_out {
6088 void set_output_strided(
6089 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6090 TensorOptions options, DimnameList names
6091 ) override {
6092 outputs_[output_idx] = create_out(sizes, strides, options);
6093 if (!names.empty()) {
6094 namedinference::propagate_names(*outputs_[output_idx], names);
6095 }
6096 // super must happen after, so that downstream can use maybe_get_output
6097 // to retrieve the output
6098 at::native::structured_gcd_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6099 }
6100 void set_output_raw_strided(
6101 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6102 TensorOptions options, DimnameList names
6103 ) override {
6104 outputs_[output_idx] = create_out(sizes, strides, options);
6105 if (!names.empty()) {
6106 namedinference::propagate_names(*outputs_[output_idx], names);
6107 }
6108 // super must happen after, so that downstream can use maybe_get_output
6109 // to retrieve the output
6110 at::native::structured_gcd_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6111 }
6112 const Tensor& maybe_get_output(int64_t output_idx) override {
6113 return *outputs_[output_idx];
6114 }
6115 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6116};
6117at::Tensor wrapper_CPU_gcd(const at::Tensor & self, const at::Tensor & other) {
6118structured_gcd_out_functional op;
6119op.meta(self, other);
6120op.impl(self, other, *op.outputs_[0]);
6121return std::move(op.outputs_[0]).take();
6122}
6123struct structured_gcd_out_out final : public at::native::structured_gcd_out {
6124 structured_gcd_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
6125 void set_output_strided(
6126 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6127 TensorOptions options, DimnameList names
6128 ) override {
6129 const auto& out = outputs_[output_idx].get();
6130 resize_out(out, sizes, strides, options);
6131 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6132 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6133 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6134 }
6135 if (!names.empty()) {
6136 namedinference::propagate_names(outputs_[output_idx], names);
6137 }
6138 // super must happen after, so that downstream can use maybe_get_output
6139 // to retrieve the output
6140 at::native::structured_gcd_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6141 }
6142 void set_output_raw_strided(
6143 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6144 TensorOptions options, DimnameList names
6145 ) override {
6146 const auto& out = outputs_[output_idx].get();
6147 resize_out(out, sizes, strides, options);
6148 if (!names.empty()) {
6149 namedinference::propagate_names(outputs_[output_idx], names);
6150 }
6151 // super must happen after, so that downstream can use maybe_get_output
6152 // to retrieve the output
6153 at::native::structured_gcd_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6154 }
6155 const Tensor& maybe_get_output(int64_t output_idx) override {
6156 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6157 }
6158 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6159 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6160};
6161at::Tensor & wrapper_CPU_gcd_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
6162structured_gcd_out_out op(out);
6163op.meta(self, other);
6164op.impl(self, other, op.maybe_get_output(0));
6165if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6166return out;
6167}
6168struct structured_gcd_out_inplace final : public at::native::structured_gcd_out {
6169 structured_gcd_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
6170 void set_output_strided(
6171 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6172 TensorOptions options, DimnameList names
6173 ) override {
6174 const auto& out = outputs_[output_idx].get();
6175 check_inplace(out, sizes, options);
6176 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6177 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6178 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6179 }
6180 if (!names.empty()) {
6181 namedinference::propagate_names(outputs_[output_idx], names);
6182 }
6183 // super must happen after, so that downstream can use maybe_get_output
6184 // to retrieve the output
6185 at::native::structured_gcd_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6186 }
6187 void set_output_raw_strided(
6188 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6189 TensorOptions options, DimnameList names
6190 ) override {
6191 const auto& out = outputs_[output_idx].get();
6192 check_inplace(out, sizes, options);
6193 if (!names.empty()) {
6194 namedinference::propagate_names(outputs_[output_idx], names);
6195 }
6196 // super must happen after, so that downstream can use maybe_get_output
6197 // to retrieve the output
6198 at::native::structured_gcd_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6199 }
6200 const Tensor& maybe_get_output(int64_t output_idx) override {
6201 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6202 }
6203 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6204 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6205};
6206at::Tensor & wrapper_CPU_gcd_(at::Tensor & self, const at::Tensor & other) {
6207structured_gcd_out_inplace op(self);
6208op.meta(self, other);
6209op.impl(self, other, op.outputs_[0]);
6210if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6211return self;
6212}
6213struct structured_lcm_out_functional final : public at::native::structured_lcm_out {
6214 void set_output_strided(
6215 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6216 TensorOptions options, DimnameList names
6217 ) override {
6218 outputs_[output_idx] = create_out(sizes, strides, options);
6219 if (!names.empty()) {
6220 namedinference::propagate_names(*outputs_[output_idx], names);
6221 }
6222 // super must happen after, so that downstream can use maybe_get_output
6223 // to retrieve the output
6224 at::native::structured_lcm_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6225 }
6226 void set_output_raw_strided(
6227 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6228 TensorOptions options, DimnameList names
6229 ) override {
6230 outputs_[output_idx] = create_out(sizes, strides, options);
6231 if (!names.empty()) {
6232 namedinference::propagate_names(*outputs_[output_idx], names);
6233 }
6234 // super must happen after, so that downstream can use maybe_get_output
6235 // to retrieve the output
6236 at::native::structured_lcm_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6237 }
6238 const Tensor& maybe_get_output(int64_t output_idx) override {
6239 return *outputs_[output_idx];
6240 }
6241 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6242};
6243at::Tensor wrapper_CPU_lcm(const at::Tensor & self, const at::Tensor & other) {
6244structured_lcm_out_functional op;
6245op.meta(self, other);
6246op.impl(self, other, *op.outputs_[0]);
6247return std::move(op.outputs_[0]).take();
6248}
6249struct structured_lcm_out_out final : public at::native::structured_lcm_out {
6250 structured_lcm_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
6251 void set_output_strided(
6252 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6253 TensorOptions options, DimnameList names
6254 ) override {
6255 const auto& out = outputs_[output_idx].get();
6256 resize_out(out, sizes, strides, options);
6257 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6258 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6259 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6260 }
6261 if (!names.empty()) {
6262 namedinference::propagate_names(outputs_[output_idx], names);
6263 }
6264 // super must happen after, so that downstream can use maybe_get_output
6265 // to retrieve the output
6266 at::native::structured_lcm_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6267 }
6268 void set_output_raw_strided(
6269 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6270 TensorOptions options, DimnameList names
6271 ) override {
6272 const auto& out = outputs_[output_idx].get();
6273 resize_out(out, sizes, strides, options);
6274 if (!names.empty()) {
6275 namedinference::propagate_names(outputs_[output_idx], names);
6276 }
6277 // super must happen after, so that downstream can use maybe_get_output
6278 // to retrieve the output
6279 at::native::structured_lcm_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6280 }
6281 const Tensor& maybe_get_output(int64_t output_idx) override {
6282 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6283 }
6284 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6285 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6286};
6287at::Tensor & wrapper_CPU_lcm_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
6288structured_lcm_out_out op(out);
6289op.meta(self, other);
6290op.impl(self, other, op.maybe_get_output(0));
6291if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6292return out;
6293}
6294struct structured_lcm_out_inplace final : public at::native::structured_lcm_out {
6295 structured_lcm_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
6296 void set_output_strided(
6297 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6298 TensorOptions options, DimnameList names
6299 ) override {
6300 const auto& out = outputs_[output_idx].get();
6301 check_inplace(out, sizes, options);
6302 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6303 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6304 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6305 }
6306 if (!names.empty()) {
6307 namedinference::propagate_names(outputs_[output_idx], names);
6308 }
6309 // super must happen after, so that downstream can use maybe_get_output
6310 // to retrieve the output
6311 at::native::structured_lcm_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6312 }
6313 void set_output_raw_strided(
6314 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6315 TensorOptions options, DimnameList names
6316 ) override {
6317 const auto& out = outputs_[output_idx].get();
6318 check_inplace(out, sizes, options);
6319 if (!names.empty()) {
6320 namedinference::propagate_names(outputs_[output_idx], names);
6321 }
6322 // super must happen after, so that downstream can use maybe_get_output
6323 // to retrieve the output
6324 at::native::structured_lcm_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6325 }
6326 const Tensor& maybe_get_output(int64_t output_idx) override {
6327 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6328 }
6329 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6330 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6331};
6332at::Tensor & wrapper_CPU_lcm_(at::Tensor & self, const at::Tensor & other) {
6333structured_lcm_out_inplace op(self);
6334op.meta(self, other);
6335op.impl(self, other, op.outputs_[0]);
6336if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6337return self;
6338}
6339namespace {
6340at::Tensor wrapper_CPU__grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
6341 // No device check
6342 // DeviceGuard omitted
6343 return at::native::grid_sampler_2d_cpu(input, grid, interpolation_mode, padding_mode, align_corners);
6344}
6345} // anonymous namespace
6346namespace {
6347::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
6348 // No device check
6349 // DeviceGuard omitted
6350 return at::native::grid_sampler_2d_backward_cpu(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
6351}
6352} // anonymous namespace
6353namespace {
6354at::Tensor wrapper_CPU__grid_sampler_3d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
6355 // No device check
6356 // DeviceGuard omitted
6357 return at::native::grid_sampler_3d_cpu(input, grid, interpolation_mode, padding_mode, align_corners);
6358}
6359} // anonymous namespace
6360namespace {
6361::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
6362 // No device check
6363 // DeviceGuard omitted
6364 return at::native::grid_sampler_3d_backward_cpu(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
6365}
6366} // anonymous namespace
6367namespace {
6368::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__native_group_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
6369 // No device check
6370 // DeviceGuard omitted
6371 return at::native::native_group_norm(input, weight, bias, N.expect_int(), C.expect_int(), HxW.expect_int(), group, eps);
6372}
6373} // anonymous namespace
6374namespace {
6375::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__native_group_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
6376 // No device check
6377 // DeviceGuard omitted
6378 return at::native::native_group_norm_backward(grad_out, input, mean, rstd, weight, N.expect_int(), C.expect_int(), HxW.expect_int(), group, output_mask);
6379}
6380} // anonymous namespace
6381namespace {
6382at::Tensor wrapper_CPU___fft_r2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
6383 // No device check
6384 // DeviceGuard omitted
6385 return at::native::_fft_r2c_mkl(self, dim, normalization, onesided);
6386}
6387} // anonymous namespace
6388namespace {
6389at::Tensor & wrapper_CPU_out__fft_r2c_out(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out) {
6390 // No device check
6391 // DeviceGuard omitted
6392 return at::native::_fft_r2c_mkl_out(self, dim, normalization, onesided, out);
6393}
6394} // anonymous namespace
6395namespace {
6396at::Tensor wrapper_CPU___fft_c2r(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) {
6397 // No device check
6398 // DeviceGuard omitted
6399 return at::native::_fft_c2r_mkl(self, dim, normalization, last_dim_size);
6400}
6401} // anonymous namespace
6402namespace {
6403at::Tensor & wrapper_CPU_out__fft_c2r_out(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size, at::Tensor & out) {
6404 // No device check
6405 // DeviceGuard omitted
6406 return at::native::_fft_c2r_mkl_out(self, dim, normalization, last_dim_size, out);
6407}
6408} // anonymous namespace
6409namespace {
6410at::Tensor wrapper_CPU___fft_c2c(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
6411 // No device check
6412 // DeviceGuard omitted
6413 return at::native::_fft_c2c_mkl(self, C10_AS_INTARRAYREF_SLOW(dim), normalization, forward);
6414}
6415} // anonymous namespace
6416namespace {
6417at::Tensor & wrapper_CPU_out__fft_c2c_out(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
6418 // No device check
6419 // DeviceGuard omitted
6420 return at::native::_fft_c2c_mkl_out(self, C10_AS_INTARRAYREF_SLOW(dim), normalization, forward, out);
6421}
6422} // anonymous namespace
6423namespace {
6424void wrapper_CPU___validate_compressed_sparse_indices(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
6425 // No device check
6426 // DeviceGuard omitted
6427 return at::native::_validate_compressed_sparse_indices_cpu(is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
6428}
6429} // anonymous namespace
6430struct structured_index_out_functional final : public at::native::structured_index_out {
6431 void set_output_strided(
6432 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6433 TensorOptions options, DimnameList names
6434 ) override {
6435 outputs_[output_idx] = create_out(sizes, strides, options);
6436 if (!names.empty()) {
6437 namedinference::propagate_names(*outputs_[output_idx], names);
6438 }
6439 // super must happen after, so that downstream can use maybe_get_output
6440 // to retrieve the output
6441 at::native::structured_index_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6442 }
6443 void set_output_raw_strided(
6444 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6445 TensorOptions options, DimnameList names
6446 ) override {
6447 outputs_[output_idx] = create_out(sizes, strides, options);
6448 if (!names.empty()) {
6449 namedinference::propagate_names(*outputs_[output_idx], names);
6450 }
6451 // super must happen after, so that downstream can use maybe_get_output
6452 // to retrieve the output
6453 at::native::structured_index_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6454 }
6455 const Tensor& maybe_get_output(int64_t output_idx) override {
6456 return *outputs_[output_idx];
6457 }
6458 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6459};
6460at::Tensor wrapper_CPU_index_Tensor(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
6461structured_index_out_functional op;
6462auto precompute = op.meta(self, at::IOptTensorListRef(indices));
6463(void)precompute;
6464op.impl(self, precompute.sizes, precompute.strides, *op.outputs_[0]);
6465return std::move(op.outputs_[0]).take();
6466}
6467struct structured_index_out_out final : public at::native::structured_index_out {
6468 structured_index_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
6469 void set_output_strided(
6470 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6471 TensorOptions options, DimnameList names
6472 ) override {
6473 const auto& out = outputs_[output_idx].get();
6474 resize_out(out, sizes, strides, options);
6475 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6476 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6477 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6478 }
6479 if (!names.empty()) {
6480 namedinference::propagate_names(outputs_[output_idx], names);
6481 }
6482 // super must happen after, so that downstream can use maybe_get_output
6483 // to retrieve the output
6484 at::native::structured_index_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6485 }
6486 void set_output_raw_strided(
6487 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6488 TensorOptions options, DimnameList names
6489 ) override {
6490 const auto& out = outputs_[output_idx].get();
6491 resize_out(out, sizes, strides, options);
6492 if (!names.empty()) {
6493 namedinference::propagate_names(outputs_[output_idx], names);
6494 }
6495 // super must happen after, so that downstream can use maybe_get_output
6496 // to retrieve the output
6497 at::native::structured_index_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6498 }
6499 const Tensor& maybe_get_output(int64_t output_idx) override {
6500 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6501 }
6502 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6503 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6504};
6505at::Tensor & wrapper_CPU_index_out_Tensor_out(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, at::Tensor & out) {
6506structured_index_out_out op(out);
6507auto precompute = op.meta(self, at::IOptTensorListRef(indices));
6508(void)precompute;
6509op.impl(self, precompute.sizes, precompute.strides, op.maybe_get_output(0));
6510if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6511return out;
6512}
6513struct structured_index_copy_out_functional final : public at::native::structured_index_copy_out {
6514 void set_output_strided(
6515 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6516 TensorOptions options, DimnameList names
6517 ) override {
6518 outputs_[output_idx] = create_out(sizes, strides, options);
6519 if (!names.empty()) {
6520 namedinference::propagate_names(*outputs_[output_idx], names);
6521 }
6522 // super must happen after, so that downstream can use maybe_get_output
6523 // to retrieve the output
6524 }
6525 void set_output_raw_strided(
6526 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6527 TensorOptions options, DimnameList names
6528 ) override {
6529 outputs_[output_idx] = create_out(sizes, strides, options);
6530 if (!names.empty()) {
6531 namedinference::propagate_names(*outputs_[output_idx], names);
6532 }
6533 // super must happen after, so that downstream can use maybe_get_output
6534 // to retrieve the output
6535 }
6536 const Tensor& maybe_get_output(int64_t output_idx) override {
6537 return *outputs_[output_idx];
6538 }
6539 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6540};
6541at::Tensor wrapper_CPU_index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
6542structured_index_copy_out_functional op;
6543auto precompute = op.meta(self, dim, index, source);
6544(void)precompute;
6545op.impl(self, precompute.dim, index, source, *op.outputs_[0]);
6546return std::move(op.outputs_[0]).take();
6547}
6548struct structured_index_copy_out_out final : public at::native::structured_index_copy_out {
6549 structured_index_copy_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
6550 void set_output_strided(
6551 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6552 TensorOptions options, DimnameList names
6553 ) override {
6554 const auto& out = outputs_[output_idx].get();
6555 resize_out(out, sizes, strides, options);
6556 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6557 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6558 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6559 }
6560 if (!names.empty()) {
6561 namedinference::propagate_names(outputs_[output_idx], names);
6562 }
6563 // super must happen after, so that downstream can use maybe_get_output
6564 // to retrieve the output
6565 }
6566 void set_output_raw_strided(
6567 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6568 TensorOptions options, DimnameList names
6569 ) override {
6570 const auto& out = outputs_[output_idx].get();
6571 resize_out(out, sizes, strides, options);
6572 if (!names.empty()) {
6573 namedinference::propagate_names(outputs_[output_idx], names);
6574 }
6575 // super must happen after, so that downstream can use maybe_get_output
6576 // to retrieve the output
6577 }
6578 const Tensor& maybe_get_output(int64_t output_idx) override {
6579 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6580 }
6581 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6582 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6583};
6584at::Tensor & wrapper_CPU_index_copy_out_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) {
6585structured_index_copy_out_out op(out);
6586auto precompute = op.meta(self, dim, index, source);
6587(void)precompute;
6588op.impl(self, precompute.dim, index, source, op.maybe_get_output(0));
6589if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6590return out;
6591}
6592struct structured_index_copy_out_inplace final : public at::native::structured_index_copy_out {
6593 structured_index_copy_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
6594 void set_output_strided(
6595 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6596 TensorOptions options, DimnameList names
6597 ) override {
6598 const auto& out = outputs_[output_idx].get();
6599 check_inplace(out, sizes, options);
6600 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6601 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6602 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6603 }
6604 if (!names.empty()) {
6605 namedinference::propagate_names(outputs_[output_idx], names);
6606 }
6607 // super must happen after, so that downstream can use maybe_get_output
6608 // to retrieve the output
6609 }
6610 void set_output_raw_strided(
6611 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6612 TensorOptions options, DimnameList names
6613 ) override {
6614 const auto& out = outputs_[output_idx].get();
6615 check_inplace(out, sizes, options);
6616 if (!names.empty()) {
6617 namedinference::propagate_names(outputs_[output_idx], names);
6618 }
6619 // super must happen after, so that downstream can use maybe_get_output
6620 // to retrieve the output
6621 }
6622 const Tensor& maybe_get_output(int64_t output_idx) override {
6623 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6624 }
6625 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6626 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6627};
6628at::Tensor & wrapper_CPU_index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
6629structured_index_copy_out_inplace op(self);
6630auto precompute = op.meta(self, dim, index, source);
6631(void)precompute;
6632op.impl(self, precompute.dim, index, source, op.outputs_[0]);
6633if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6634return self;
6635}
6636namespace {
6637at::Tensor & wrapper_CPU___index_put_impl_(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
6638 // No device check
6639 // DeviceGuard omitted
6640 return at::native::_index_put_impl_(self, indices, values, accumulate, unsafe);
6641}
6642} // anonymous namespace
6643struct structured_isin_Tensor_Tensor_out_functional final : public at::native::structured_isin_Tensor_Tensor_out {
6644 void set_output_strided(
6645 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6646 TensorOptions options, DimnameList names
6647 ) override {
6648 outputs_[output_idx] = create_out(sizes, strides, options);
6649 if (!names.empty()) {
6650 namedinference::propagate_names(*outputs_[output_idx], names);
6651 }
6652 // super must happen after, so that downstream can use maybe_get_output
6653 // to retrieve the output
6654 }
6655 void set_output_raw_strided(
6656 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6657 TensorOptions options, DimnameList names
6658 ) override {
6659 outputs_[output_idx] = create_out(sizes, strides, options);
6660 if (!names.empty()) {
6661 namedinference::propagate_names(*outputs_[output_idx], names);
6662 }
6663 // super must happen after, so that downstream can use maybe_get_output
6664 // to retrieve the output
6665 }
6666 const Tensor& maybe_get_output(int64_t output_idx) override {
6667 return *outputs_[output_idx];
6668 }
6669 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6670};
6671at::Tensor wrapper_CPU_isin_Tensor_Tensor(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
6672structured_isin_Tensor_Tensor_out_functional op;
6673op.meta(elements, test_elements, assume_unique, invert);
6674op.impl(elements, test_elements, assume_unique, invert, *op.outputs_[0]);
6675return std::move(op.outputs_[0]).take();
6676}
6677struct structured_isin_Tensor_Tensor_out_out final : public at::native::structured_isin_Tensor_Tensor_out {
6678 structured_isin_Tensor_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
6679 void set_output_strided(
6680 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6681 TensorOptions options, DimnameList names
6682 ) override {
6683 const auto& out = outputs_[output_idx].get();
6684 resize_out(out, sizes, strides, options);
6685 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6686 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6687 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6688 }
6689 if (!names.empty()) {
6690 namedinference::propagate_names(outputs_[output_idx], names);
6691 }
6692 // super must happen after, so that downstream can use maybe_get_output
6693 // to retrieve the output
6694 }
6695 void set_output_raw_strided(
6696 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6697 TensorOptions options, DimnameList names
6698 ) override {
6699 const auto& out = outputs_[output_idx].get();
6700 resize_out(out, sizes, strides, options);
6701 if (!names.empty()) {
6702 namedinference::propagate_names(outputs_[output_idx], names);
6703 }
6704 // super must happen after, so that downstream can use maybe_get_output
6705 // to retrieve the output
6706 }
6707 const Tensor& maybe_get_output(int64_t output_idx) override {
6708 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6709 }
6710 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6711 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6712};
6713at::Tensor & wrapper_CPU_isin_out_Tensor_Tensor_out(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
6714structured_isin_Tensor_Tensor_out_out op(out);
6715op.meta(elements, test_elements, assume_unique, invert);
6716op.impl(elements, test_elements, assume_unique, invert, op.maybe_get_output(0));
6717if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6718return out;
6719}
6720struct structured_isin_Tensor_Scalar_out_functional final : public at::native::structured_isin_Tensor_Scalar_out {
6721 void set_output_strided(
6722 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6723 TensorOptions options, DimnameList names
6724 ) override {
6725 outputs_[output_idx] = create_out(sizes, strides, options);
6726 if (!names.empty()) {
6727 namedinference::propagate_names(*outputs_[output_idx], names);
6728 }
6729 // super must happen after, so that downstream can use maybe_get_output
6730 // to retrieve the output
6731 }
6732 void set_output_raw_strided(
6733 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6734 TensorOptions options, DimnameList names
6735 ) override {
6736 outputs_[output_idx] = create_out(sizes, strides, options);
6737 if (!names.empty()) {
6738 namedinference::propagate_names(*outputs_[output_idx], names);
6739 }
6740 // super must happen after, so that downstream can use maybe_get_output
6741 // to retrieve the output
6742 }
6743 const Tensor& maybe_get_output(int64_t output_idx) override {
6744 return *outputs_[output_idx];
6745 }
6746 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6747};
6748at::Tensor wrapper_CPU_isin_Tensor_Scalar(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
6749structured_isin_Tensor_Scalar_out_functional op;
6750op.meta(elements, test_element, assume_unique, invert);
6751op.impl(elements, test_element, assume_unique, invert, *op.outputs_[0]);
6752return std::move(op.outputs_[0]).take();
6753}
6754struct structured_isin_Tensor_Scalar_out_out final : public at::native::structured_isin_Tensor_Scalar_out {
6755 structured_isin_Tensor_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
6756 void set_output_strided(
6757 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6758 TensorOptions options, DimnameList names
6759 ) override {
6760 const auto& out = outputs_[output_idx].get();
6761 resize_out(out, sizes, strides, options);
6762 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6763 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6764 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6765 }
6766 if (!names.empty()) {
6767 namedinference::propagate_names(outputs_[output_idx], names);
6768 }
6769 // super must happen after, so that downstream can use maybe_get_output
6770 // to retrieve the output
6771 }
6772 void set_output_raw_strided(
6773 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6774 TensorOptions options, DimnameList names
6775 ) override {
6776 const auto& out = outputs_[output_idx].get();
6777 resize_out(out, sizes, strides, options);
6778 if (!names.empty()) {
6779 namedinference::propagate_names(outputs_[output_idx], names);
6780 }
6781 // super must happen after, so that downstream can use maybe_get_output
6782 // to retrieve the output
6783 }
6784 const Tensor& maybe_get_output(int64_t output_idx) override {
6785 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6786 }
6787 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6788 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6789};
6790at::Tensor & wrapper_CPU_isin_out_Tensor_Scalar_out(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) {
6791structured_isin_Tensor_Scalar_out_out op(out);
6792op.meta(elements, test_element, assume_unique, invert);
6793op.impl(elements, test_element, assume_unique, invert, op.maybe_get_output(0));
6794if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6795return out;
6796}
6797struct structured_isin_Scalar_Tensor_out_functional final : public at::native::structured_isin_Scalar_Tensor_out {
6798 void set_output_strided(
6799 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6800 TensorOptions options, DimnameList names
6801 ) override {
6802 outputs_[output_idx] = create_out(sizes, strides, options);
6803 if (!names.empty()) {
6804 namedinference::propagate_names(*outputs_[output_idx], names);
6805 }
6806 // super must happen after, so that downstream can use maybe_get_output
6807 // to retrieve the output
6808 }
6809 void set_output_raw_strided(
6810 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6811 TensorOptions options, DimnameList names
6812 ) override {
6813 outputs_[output_idx] = create_out(sizes, strides, options);
6814 if (!names.empty()) {
6815 namedinference::propagate_names(*outputs_[output_idx], names);
6816 }
6817 // super must happen after, so that downstream can use maybe_get_output
6818 // to retrieve the output
6819 }
6820 const Tensor& maybe_get_output(int64_t output_idx) override {
6821 return *outputs_[output_idx];
6822 }
6823 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6824};
6825at::Tensor wrapper_CPU_isin_Scalar_Tensor(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
6826structured_isin_Scalar_Tensor_out_functional op;
6827op.meta(element, test_elements, assume_unique, invert);
6828op.impl(element, test_elements, assume_unique, invert, *op.outputs_[0]);
6829return std::move(op.outputs_[0]).take();
6830}
6831struct structured_isin_Scalar_Tensor_out_out final : public at::native::structured_isin_Scalar_Tensor_out {
6832 structured_isin_Scalar_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
6833 void set_output_strided(
6834 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6835 TensorOptions options, DimnameList names
6836 ) override {
6837 const auto& out = outputs_[output_idx].get();
6838 resize_out(out, sizes, strides, options);
6839 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6840 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6841 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6842 }
6843 if (!names.empty()) {
6844 namedinference::propagate_names(outputs_[output_idx], names);
6845 }
6846 // super must happen after, so that downstream can use maybe_get_output
6847 // to retrieve the output
6848 }
6849 void set_output_raw_strided(
6850 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6851 TensorOptions options, DimnameList names
6852 ) override {
6853 const auto& out = outputs_[output_idx].get();
6854 resize_out(out, sizes, strides, options);
6855 if (!names.empty()) {
6856 namedinference::propagate_names(outputs_[output_idx], names);
6857 }
6858 // super must happen after, so that downstream can use maybe_get_output
6859 // to retrieve the output
6860 }
6861 const Tensor& maybe_get_output(int64_t output_idx) override {
6862 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6863 }
6864 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6865 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6866};
6867at::Tensor & wrapper_CPU_isin_out_Scalar_Tensor_out(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
6868structured_isin_Scalar_Tensor_out_out op(out);
6869op.meta(element, test_elements, assume_unique, invert);
6870op.impl(element, test_elements, assume_unique, invert, op.maybe_get_output(0));
6871if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6872return out;
6873}
6874namespace {
6875at::Tensor wrapper_CPU__isnan(const at::Tensor & self) {
6876 // No device check
6877 // DeviceGuard omitted
6878 return at::native::isnan(self);
6879}
6880} // anonymous namespace
6881namespace {
6882::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_values_kthvalue_out(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
6883 // No device check
6884 // DeviceGuard omitted
6885 return at::native::kthvalue_out_cpu(self, k, dim, keepdim, values, indices);
6886}
6887} // anonymous namespace
6888namespace {
6889::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__native_layer_norm(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
6890 // No device check
6891 // DeviceGuard omitted
6892 return at::native::layer_norm_cpu(input, C10_AS_INTARRAYREF_SLOW(normalized_shape), weight, bias, eps);
6893}
6894} // anonymous namespace
6895namespace {
6896::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
6897 // No device check
6898 // DeviceGuard omitted
6899 return at::native::layer_norm_backward_cpu(grad_out, input, C10_AS_INTARRAYREF_SLOW(normalized_shape), mean, rstd, weight, bias, output_mask);
6900}
6901} // anonymous namespace
6902namespace {
6903at::Tensor & wrapper_CPU_out_nan_to_num_out(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf, at::Tensor & out) {
6904 // No device check
6905 // DeviceGuard omitted
6906 return at::native::nan_to_num_out(self, nan, posinf, neginf, out);
6907}
6908} // anonymous namespace
6909namespace {
6910at::Tensor & wrapper_CPU_out_linspace_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
6911 // No device check
6912 // DeviceGuard omitted
6913 return at::native::linspace_out(start, end, steps, out);
6914}
6915} // anonymous namespace
6916struct structured_log_out_functional final : public at::native::structured_log_out {
6917 void set_output_strided(
6918 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6919 TensorOptions options, DimnameList names
6920 ) override {
6921 outputs_[output_idx] = create_out(sizes, strides, options);
6922 if (!names.empty()) {
6923 namedinference::propagate_names(*outputs_[output_idx], names);
6924 }
6925 // super must happen after, so that downstream can use maybe_get_output
6926 // to retrieve the output
6927 at::native::structured_log_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6928 }
6929 void set_output_raw_strided(
6930 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6931 TensorOptions options, DimnameList names
6932 ) override {
6933 outputs_[output_idx] = create_out(sizes, strides, options);
6934 if (!names.empty()) {
6935 namedinference::propagate_names(*outputs_[output_idx], names);
6936 }
6937 // super must happen after, so that downstream can use maybe_get_output
6938 // to retrieve the output
6939 at::native::structured_log_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6940 }
6941 const Tensor& maybe_get_output(int64_t output_idx) override {
6942 return *outputs_[output_idx];
6943 }
6944 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
6945};
6946at::Tensor wrapper_CPU_log(const at::Tensor & self) {
6947structured_log_out_functional op;
6948op.meta(self);
6949op.impl(self, *op.outputs_[0]);
6950return std::move(op.outputs_[0]).take();
6951}
6952struct structured_log_out_out final : public at::native::structured_log_out {
6953 structured_log_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
6954 void set_output_strided(
6955 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6956 TensorOptions options, DimnameList names
6957 ) override {
6958 const auto& out = outputs_[output_idx].get();
6959 resize_out(out, sizes, strides, options);
6960 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
6961 if (C10_UNLIKELY(maybe_proxy.has_value())) {
6962 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
6963 }
6964 if (!names.empty()) {
6965 namedinference::propagate_names(outputs_[output_idx], names);
6966 }
6967 // super must happen after, so that downstream can use maybe_get_output
6968 // to retrieve the output
6969 at::native::structured_log_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6970 }
6971 void set_output_raw_strided(
6972 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
6973 TensorOptions options, DimnameList names
6974 ) override {
6975 const auto& out = outputs_[output_idx].get();
6976 resize_out(out, sizes, strides, options);
6977 if (!names.empty()) {
6978 namedinference::propagate_names(outputs_[output_idx], names);
6979 }
6980 // super must happen after, so that downstream can use maybe_get_output
6981 // to retrieve the output
6982 at::native::structured_log_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
6983 }
6984 const Tensor& maybe_get_output(int64_t output_idx) override {
6985 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
6986 }
6987 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
6988 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
6989};
6990at::Tensor & wrapper_CPU_log_out_out(const at::Tensor & self, at::Tensor & out) {
6991structured_log_out_out op(out);
6992op.meta(self);
6993op.impl(self, op.maybe_get_output(0));
6994if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
6995return out;
6996}
6997struct structured_log_out_inplace final : public at::native::structured_log_out {
6998 structured_log_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
6999 void set_output_strided(
7000 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7001 TensorOptions options, DimnameList names
7002 ) override {
7003 const auto& out = outputs_[output_idx].get();
7004 check_inplace(out, sizes, options);
7005 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7006 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7007 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7008 }
7009 if (!names.empty()) {
7010 namedinference::propagate_names(outputs_[output_idx], names);
7011 }
7012 // super must happen after, so that downstream can use maybe_get_output
7013 // to retrieve the output
7014 at::native::structured_log_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7015 }
7016 void set_output_raw_strided(
7017 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7018 TensorOptions options, DimnameList names
7019 ) override {
7020 const auto& out = outputs_[output_idx].get();
7021 check_inplace(out, sizes, options);
7022 if (!names.empty()) {
7023 namedinference::propagate_names(outputs_[output_idx], names);
7024 }
7025 // super must happen after, so that downstream can use maybe_get_output
7026 // to retrieve the output
7027 at::native::structured_log_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7028 }
7029 const Tensor& maybe_get_output(int64_t output_idx) override {
7030 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7031 }
7032 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7033 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7034};
7035at::Tensor & wrapper_CPU_log_(at::Tensor & self) {
7036structured_log_out_inplace op(self);
7037op.meta(self);
7038op.impl(self, op.outputs_[0]);
7039if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7040return self;
7041}
7042struct structured_log10_out_functional final : public at::native::structured_log10_out {
7043 void set_output_strided(
7044 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7045 TensorOptions options, DimnameList names
7046 ) override {
7047 outputs_[output_idx] = create_out(sizes, strides, options);
7048 if (!names.empty()) {
7049 namedinference::propagate_names(*outputs_[output_idx], names);
7050 }
7051 // super must happen after, so that downstream can use maybe_get_output
7052 // to retrieve the output
7053 at::native::structured_log10_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7054 }
7055 void set_output_raw_strided(
7056 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7057 TensorOptions options, DimnameList names
7058 ) override {
7059 outputs_[output_idx] = create_out(sizes, strides, options);
7060 if (!names.empty()) {
7061 namedinference::propagate_names(*outputs_[output_idx], names);
7062 }
7063 // super must happen after, so that downstream can use maybe_get_output
7064 // to retrieve the output
7065 at::native::structured_log10_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7066 }
7067 const Tensor& maybe_get_output(int64_t output_idx) override {
7068 return *outputs_[output_idx];
7069 }
7070 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7071};
7072at::Tensor wrapper_CPU_log10(const at::Tensor & self) {
7073structured_log10_out_functional op;
7074op.meta(self);
7075op.impl(self, *op.outputs_[0]);
7076return std::move(op.outputs_[0]).take();
7077}
7078struct structured_log10_out_out final : public at::native::structured_log10_out {
7079 structured_log10_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
7080 void set_output_strided(
7081 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7082 TensorOptions options, DimnameList names
7083 ) override {
7084 const auto& out = outputs_[output_idx].get();
7085 resize_out(out, sizes, strides, options);
7086 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7087 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7088 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7089 }
7090 if (!names.empty()) {
7091 namedinference::propagate_names(outputs_[output_idx], names);
7092 }
7093 // super must happen after, so that downstream can use maybe_get_output
7094 // to retrieve the output
7095 at::native::structured_log10_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7096 }
7097 void set_output_raw_strided(
7098 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7099 TensorOptions options, DimnameList names
7100 ) override {
7101 const auto& out = outputs_[output_idx].get();
7102 resize_out(out, sizes, strides, options);
7103 if (!names.empty()) {
7104 namedinference::propagate_names(outputs_[output_idx], names);
7105 }
7106 // super must happen after, so that downstream can use maybe_get_output
7107 // to retrieve the output
7108 at::native::structured_log10_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7109 }
7110 const Tensor& maybe_get_output(int64_t output_idx) override {
7111 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7112 }
7113 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7114 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7115};
7116at::Tensor & wrapper_CPU_log10_out_out(const at::Tensor & self, at::Tensor & out) {
7117structured_log10_out_out op(out);
7118op.meta(self);
7119op.impl(self, op.maybe_get_output(0));
7120if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7121return out;
7122}
7123struct structured_log10_out_inplace final : public at::native::structured_log10_out {
7124 structured_log10_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
7125 void set_output_strided(
7126 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7127 TensorOptions options, DimnameList names
7128 ) override {
7129 const auto& out = outputs_[output_idx].get();
7130 check_inplace(out, sizes, options);
7131 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7132 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7133 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7134 }
7135 if (!names.empty()) {
7136 namedinference::propagate_names(outputs_[output_idx], names);
7137 }
7138 // super must happen after, so that downstream can use maybe_get_output
7139 // to retrieve the output
7140 at::native::structured_log10_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7141 }
7142 void set_output_raw_strided(
7143 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7144 TensorOptions options, DimnameList names
7145 ) override {
7146 const auto& out = outputs_[output_idx].get();
7147 check_inplace(out, sizes, options);
7148 if (!names.empty()) {
7149 namedinference::propagate_names(outputs_[output_idx], names);
7150 }
7151 // super must happen after, so that downstream can use maybe_get_output
7152 // to retrieve the output
7153 at::native::structured_log10_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7154 }
7155 const Tensor& maybe_get_output(int64_t output_idx) override {
7156 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7157 }
7158 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7159 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7160};
7161at::Tensor & wrapper_CPU_log10_(at::Tensor & self) {
7162structured_log10_out_inplace op(self);
7163op.meta(self);
7164op.impl(self, op.outputs_[0]);
7165if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7166return self;
7167}
7168struct structured_log1p_out_functional final : public at::native::structured_log1p_out {
7169 void set_output_strided(
7170 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7171 TensorOptions options, DimnameList names
7172 ) override {
7173 outputs_[output_idx] = create_out(sizes, strides, options);
7174 if (!names.empty()) {
7175 namedinference::propagate_names(*outputs_[output_idx], names);
7176 }
7177 // super must happen after, so that downstream can use maybe_get_output
7178 // to retrieve the output
7179 at::native::structured_log1p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7180 }
7181 void set_output_raw_strided(
7182 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7183 TensorOptions options, DimnameList names
7184 ) override {
7185 outputs_[output_idx] = create_out(sizes, strides, options);
7186 if (!names.empty()) {
7187 namedinference::propagate_names(*outputs_[output_idx], names);
7188 }
7189 // super must happen after, so that downstream can use maybe_get_output
7190 // to retrieve the output
7191 at::native::structured_log1p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7192 }
7193 const Tensor& maybe_get_output(int64_t output_idx) override {
7194 return *outputs_[output_idx];
7195 }
7196 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7197};
7198at::Tensor wrapper_CPU_log1p(const at::Tensor & self) {
7199structured_log1p_out_functional op;
7200op.meta(self);
7201op.impl(self, *op.outputs_[0]);
7202return std::move(op.outputs_[0]).take();
7203}
7204struct structured_log1p_out_out final : public at::native::structured_log1p_out {
7205 structured_log1p_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
7206 void set_output_strided(
7207 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7208 TensorOptions options, DimnameList names
7209 ) override {
7210 const auto& out = outputs_[output_idx].get();
7211 resize_out(out, sizes, strides, options);
7212 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7213 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7214 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7215 }
7216 if (!names.empty()) {
7217 namedinference::propagate_names(outputs_[output_idx], names);
7218 }
7219 // super must happen after, so that downstream can use maybe_get_output
7220 // to retrieve the output
7221 at::native::structured_log1p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7222 }
7223 void set_output_raw_strided(
7224 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7225 TensorOptions options, DimnameList names
7226 ) override {
7227 const auto& out = outputs_[output_idx].get();
7228 resize_out(out, sizes, strides, options);
7229 if (!names.empty()) {
7230 namedinference::propagate_names(outputs_[output_idx], names);
7231 }
7232 // super must happen after, so that downstream can use maybe_get_output
7233 // to retrieve the output
7234 at::native::structured_log1p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7235 }
7236 const Tensor& maybe_get_output(int64_t output_idx) override {
7237 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7238 }
7239 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7240 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7241};
7242at::Tensor & wrapper_CPU_log1p_out_out(const at::Tensor & self, at::Tensor & out) {
7243structured_log1p_out_out op(out);
7244op.meta(self);
7245op.impl(self, op.maybe_get_output(0));
7246if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7247return out;
7248}
7249struct structured_log1p_out_inplace final : public at::native::structured_log1p_out {
7250 structured_log1p_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
7251 void set_output_strided(
7252 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7253 TensorOptions options, DimnameList names
7254 ) override {
7255 const auto& out = outputs_[output_idx].get();
7256 check_inplace(out, sizes, options);
7257 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7258 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7259 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7260 }
7261 if (!names.empty()) {
7262 namedinference::propagate_names(outputs_[output_idx], names);
7263 }
7264 // super must happen after, so that downstream can use maybe_get_output
7265 // to retrieve the output
7266 at::native::structured_log1p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7267 }
7268 void set_output_raw_strided(
7269 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7270 TensorOptions options, DimnameList names
7271 ) override {
7272 const auto& out = outputs_[output_idx].get();
7273 check_inplace(out, sizes, options);
7274 if (!names.empty()) {
7275 namedinference::propagate_names(outputs_[output_idx], names);
7276 }
7277 // super must happen after, so that downstream can use maybe_get_output
7278 // to retrieve the output
7279 at::native::structured_log1p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7280 }
7281 const Tensor& maybe_get_output(int64_t output_idx) override {
7282 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7283 }
7284 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7285 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7286};
7287at::Tensor & wrapper_CPU_log1p_(at::Tensor & self) {
7288structured_log1p_out_inplace op(self);
7289op.meta(self);
7290op.impl(self, op.outputs_[0]);
7291if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7292return self;
7293}
7294struct structured_log2_out_functional final : public at::native::structured_log2_out {
7295 void set_output_strided(
7296 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7297 TensorOptions options, DimnameList names
7298 ) override {
7299 outputs_[output_idx] = create_out(sizes, strides, options);
7300 if (!names.empty()) {
7301 namedinference::propagate_names(*outputs_[output_idx], names);
7302 }
7303 // super must happen after, so that downstream can use maybe_get_output
7304 // to retrieve the output
7305 at::native::structured_log2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7306 }
7307 void set_output_raw_strided(
7308 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7309 TensorOptions options, DimnameList names
7310 ) override {
7311 outputs_[output_idx] = create_out(sizes, strides, options);
7312 if (!names.empty()) {
7313 namedinference::propagate_names(*outputs_[output_idx], names);
7314 }
7315 // super must happen after, so that downstream can use maybe_get_output
7316 // to retrieve the output
7317 at::native::structured_log2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7318 }
7319 const Tensor& maybe_get_output(int64_t output_idx) override {
7320 return *outputs_[output_idx];
7321 }
7322 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7323};
7324at::Tensor wrapper_CPU_log2(const at::Tensor & self) {
7325structured_log2_out_functional op;
7326op.meta(self);
7327op.impl(self, *op.outputs_[0]);
7328return std::move(op.outputs_[0]).take();
7329}
7330struct structured_log2_out_out final : public at::native::structured_log2_out {
7331 structured_log2_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
7332 void set_output_strided(
7333 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7334 TensorOptions options, DimnameList names
7335 ) override {
7336 const auto& out = outputs_[output_idx].get();
7337 resize_out(out, sizes, strides, options);
7338 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7339 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7340 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7341 }
7342 if (!names.empty()) {
7343 namedinference::propagate_names(outputs_[output_idx], names);
7344 }
7345 // super must happen after, so that downstream can use maybe_get_output
7346 // to retrieve the output
7347 at::native::structured_log2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7348 }
7349 void set_output_raw_strided(
7350 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7351 TensorOptions options, DimnameList names
7352 ) override {
7353 const auto& out = outputs_[output_idx].get();
7354 resize_out(out, sizes, strides, options);
7355 if (!names.empty()) {
7356 namedinference::propagate_names(outputs_[output_idx], names);
7357 }
7358 // super must happen after, so that downstream can use maybe_get_output
7359 // to retrieve the output
7360 at::native::structured_log2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7361 }
7362 const Tensor& maybe_get_output(int64_t output_idx) override {
7363 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7364 }
7365 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7366 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7367};
7368at::Tensor & wrapper_CPU_log2_out_out(const at::Tensor & self, at::Tensor & out) {
7369structured_log2_out_out op(out);
7370op.meta(self);
7371op.impl(self, op.maybe_get_output(0));
7372if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7373return out;
7374}
7375struct structured_log2_out_inplace final : public at::native::structured_log2_out {
7376 structured_log2_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
7377 void set_output_strided(
7378 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7379 TensorOptions options, DimnameList names
7380 ) override {
7381 const auto& out = outputs_[output_idx].get();
7382 check_inplace(out, sizes, options);
7383 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7384 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7385 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7386 }
7387 if (!names.empty()) {
7388 namedinference::propagate_names(outputs_[output_idx], names);
7389 }
7390 // super must happen after, so that downstream can use maybe_get_output
7391 // to retrieve the output
7392 at::native::structured_log2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7393 }
7394 void set_output_raw_strided(
7395 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7396 TensorOptions options, DimnameList names
7397 ) override {
7398 const auto& out = outputs_[output_idx].get();
7399 check_inplace(out, sizes, options);
7400 if (!names.empty()) {
7401 namedinference::propagate_names(outputs_[output_idx], names);
7402 }
7403 // super must happen after, so that downstream can use maybe_get_output
7404 // to retrieve the output
7405 at::native::structured_log2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7406 }
7407 const Tensor& maybe_get_output(int64_t output_idx) override {
7408 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7409 }
7410 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7411 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7412};
7413at::Tensor & wrapper_CPU_log2_(at::Tensor & self) {
7414structured_log2_out_inplace op(self);
7415op.meta(self);
7416op.impl(self, op.outputs_[0]);
7417if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7418return self;
7419}
7420struct structured_logaddexp_out_functional final : public at::native::structured_logaddexp_out {
7421 void set_output_strided(
7422 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7423 TensorOptions options, DimnameList names
7424 ) override {
7425 outputs_[output_idx] = create_out(sizes, strides, options);
7426 if (!names.empty()) {
7427 namedinference::propagate_names(*outputs_[output_idx], names);
7428 }
7429 // super must happen after, so that downstream can use maybe_get_output
7430 // to retrieve the output
7431 at::native::structured_logaddexp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7432 }
7433 void set_output_raw_strided(
7434 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7435 TensorOptions options, DimnameList names
7436 ) override {
7437 outputs_[output_idx] = create_out(sizes, strides, options);
7438 if (!names.empty()) {
7439 namedinference::propagate_names(*outputs_[output_idx], names);
7440 }
7441 // super must happen after, so that downstream can use maybe_get_output
7442 // to retrieve the output
7443 at::native::structured_logaddexp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7444 }
7445 const Tensor& maybe_get_output(int64_t output_idx) override {
7446 return *outputs_[output_idx];
7447 }
7448 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7449};
7450at::Tensor wrapper_CPU_logaddexp(const at::Tensor & self, const at::Tensor & other) {
7451structured_logaddexp_out_functional op;
7452op.meta(self, other);
7453op.impl(self, other, *op.outputs_[0]);
7454return std::move(op.outputs_[0]).take();
7455}
7456struct structured_logaddexp_out_out final : public at::native::structured_logaddexp_out {
7457 structured_logaddexp_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
7458 void set_output_strided(
7459 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7460 TensorOptions options, DimnameList names
7461 ) override {
7462 const auto& out = outputs_[output_idx].get();
7463 resize_out(out, sizes, strides, options);
7464 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7465 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7466 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7467 }
7468 if (!names.empty()) {
7469 namedinference::propagate_names(outputs_[output_idx], names);
7470 }
7471 // super must happen after, so that downstream can use maybe_get_output
7472 // to retrieve the output
7473 at::native::structured_logaddexp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7474 }
7475 void set_output_raw_strided(
7476 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7477 TensorOptions options, DimnameList names
7478 ) override {
7479 const auto& out = outputs_[output_idx].get();
7480 resize_out(out, sizes, strides, options);
7481 if (!names.empty()) {
7482 namedinference::propagate_names(outputs_[output_idx], names);
7483 }
7484 // super must happen after, so that downstream can use maybe_get_output
7485 // to retrieve the output
7486 at::native::structured_logaddexp_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7487 }
7488 const Tensor& maybe_get_output(int64_t output_idx) override {
7489 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7490 }
7491 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7492 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7493};
7494at::Tensor & wrapper_CPU_logaddexp_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7495structured_logaddexp_out_out op(out);
7496op.meta(self, other);
7497op.impl(self, other, op.maybe_get_output(0));
7498if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7499return out;
7500}
7501struct structured_logaddexp2_out_functional final : public at::native::structured_logaddexp2_out {
7502 void set_output_strided(
7503 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7504 TensorOptions options, DimnameList names
7505 ) override {
7506 outputs_[output_idx] = create_out(sizes, strides, options);
7507 if (!names.empty()) {
7508 namedinference::propagate_names(*outputs_[output_idx], names);
7509 }
7510 // super must happen after, so that downstream can use maybe_get_output
7511 // to retrieve the output
7512 at::native::structured_logaddexp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7513 }
7514 void set_output_raw_strided(
7515 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7516 TensorOptions options, DimnameList names
7517 ) override {
7518 outputs_[output_idx] = create_out(sizes, strides, options);
7519 if (!names.empty()) {
7520 namedinference::propagate_names(*outputs_[output_idx], names);
7521 }
7522 // super must happen after, so that downstream can use maybe_get_output
7523 // to retrieve the output
7524 at::native::structured_logaddexp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7525 }
7526 const Tensor& maybe_get_output(int64_t output_idx) override {
7527 return *outputs_[output_idx];
7528 }
7529 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7530};
7531at::Tensor wrapper_CPU_logaddexp2(const at::Tensor & self, const at::Tensor & other) {
7532structured_logaddexp2_out_functional op;
7533op.meta(self, other);
7534op.impl(self, other, *op.outputs_[0]);
7535return std::move(op.outputs_[0]).take();
7536}
7537struct structured_logaddexp2_out_out final : public at::native::structured_logaddexp2_out {
7538 structured_logaddexp2_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
7539 void set_output_strided(
7540 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7541 TensorOptions options, DimnameList names
7542 ) override {
7543 const auto& out = outputs_[output_idx].get();
7544 resize_out(out, sizes, strides, options);
7545 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7546 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7547 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7548 }
7549 if (!names.empty()) {
7550 namedinference::propagate_names(outputs_[output_idx], names);
7551 }
7552 // super must happen after, so that downstream can use maybe_get_output
7553 // to retrieve the output
7554 at::native::structured_logaddexp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7555 }
7556 void set_output_raw_strided(
7557 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7558 TensorOptions options, DimnameList names
7559 ) override {
7560 const auto& out = outputs_[output_idx].get();
7561 resize_out(out, sizes, strides, options);
7562 if (!names.empty()) {
7563 namedinference::propagate_names(outputs_[output_idx], names);
7564 }
7565 // super must happen after, so that downstream can use maybe_get_output
7566 // to retrieve the output
7567 at::native::structured_logaddexp2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7568 }
7569 const Tensor& maybe_get_output(int64_t output_idx) override {
7570 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7571 }
7572 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7573 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7574};
7575at::Tensor & wrapper_CPU_logaddexp2_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7576structured_logaddexp2_out_out op(out);
7577op.meta(self, other);
7578op.impl(self, other, op.maybe_get_output(0));
7579if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7580return out;
7581}
7582struct structured_xlogy_out_functional final : public at::native::structured_xlogy_out {
7583 void set_output_strided(
7584 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7585 TensorOptions options, DimnameList names
7586 ) override {
7587 outputs_[output_idx] = create_out(sizes, strides, options);
7588 if (!names.empty()) {
7589 namedinference::propagate_names(*outputs_[output_idx], names);
7590 }
7591 // super must happen after, so that downstream can use maybe_get_output
7592 // to retrieve the output
7593 at::native::structured_xlogy_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7594 }
7595 void set_output_raw_strided(
7596 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7597 TensorOptions options, DimnameList names
7598 ) override {
7599 outputs_[output_idx] = create_out(sizes, strides, options);
7600 if (!names.empty()) {
7601 namedinference::propagate_names(*outputs_[output_idx], names);
7602 }
7603 // super must happen after, so that downstream can use maybe_get_output
7604 // to retrieve the output
7605 at::native::structured_xlogy_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7606 }
7607 const Tensor& maybe_get_output(int64_t output_idx) override {
7608 return *outputs_[output_idx];
7609 }
7610 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7611};
7612at::Tensor wrapper_CPU_xlogy_Tensor(const at::Tensor & self, const at::Tensor & other) {
7613structured_xlogy_out_functional op;
7614op.meta(self, other);
7615op.impl(self, other, *op.outputs_[0]);
7616return std::move(op.outputs_[0]).take();
7617}
7618struct structured_xlogy_out_out final : public at::native::structured_xlogy_out {
7619 structured_xlogy_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
7620 void set_output_strided(
7621 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7622 TensorOptions options, DimnameList names
7623 ) override {
7624 const auto& out = outputs_[output_idx].get();
7625 resize_out(out, sizes, strides, options);
7626 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7627 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7628 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7629 }
7630 if (!names.empty()) {
7631 namedinference::propagate_names(outputs_[output_idx], names);
7632 }
7633 // super must happen after, so that downstream can use maybe_get_output
7634 // to retrieve the output
7635 at::native::structured_xlogy_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7636 }
7637 void set_output_raw_strided(
7638 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7639 TensorOptions options, DimnameList names
7640 ) override {
7641 const auto& out = outputs_[output_idx].get();
7642 resize_out(out, sizes, strides, options);
7643 if (!names.empty()) {
7644 namedinference::propagate_names(outputs_[output_idx], names);
7645 }
7646 // super must happen after, so that downstream can use maybe_get_output
7647 // to retrieve the output
7648 at::native::structured_xlogy_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7649 }
7650 const Tensor& maybe_get_output(int64_t output_idx) override {
7651 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7652 }
7653 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7654 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7655};
7656at::Tensor & wrapper_CPU_xlogy_out_OutTensor(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
7657structured_xlogy_out_out op(out);
7658op.meta(self, other);
7659op.impl(self, other, op.maybe_get_output(0));
7660if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7661return out;
7662}
7663struct structured_xlogy_out_inplace final : public at::native::structured_xlogy_out {
7664 structured_xlogy_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
7665 void set_output_strided(
7666 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7667 TensorOptions options, DimnameList names
7668 ) override {
7669 const auto& out = outputs_[output_idx].get();
7670 check_inplace(out, sizes, options);
7671 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7672 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7673 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7674 }
7675 if (!names.empty()) {
7676 namedinference::propagate_names(outputs_[output_idx], names);
7677 }
7678 // super must happen after, so that downstream can use maybe_get_output
7679 // to retrieve the output
7680 at::native::structured_xlogy_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7681 }
7682 void set_output_raw_strided(
7683 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7684 TensorOptions options, DimnameList names
7685 ) override {
7686 const auto& out = outputs_[output_idx].get();
7687 check_inplace(out, sizes, options);
7688 if (!names.empty()) {
7689 namedinference::propagate_names(outputs_[output_idx], names);
7690 }
7691 // super must happen after, so that downstream can use maybe_get_output
7692 // to retrieve the output
7693 at::native::structured_xlogy_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
7694 }
7695 const Tensor& maybe_get_output(int64_t output_idx) override {
7696 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7697 }
7698 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7699 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7700};
7701at::Tensor & wrapper_CPU_xlogy__Tensor(at::Tensor & self, const at::Tensor & other) {
7702structured_xlogy_out_inplace op(self);
7703op.meta(self, other);
7704op.impl(self, other, op.outputs_[0]);
7705if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7706return self;
7707}
7708namespace {
7709at::Tensor & wrapper_CPU_out_logspace_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
7710 // No device check
7711 // DeviceGuard omitted
7712 return at::native::logspace_out(start, end, steps, base, out);
7713}
7714} // anonymous namespace
7715struct structured_log_softmax_cpu_out_functional final : public at::native::structured_log_softmax_cpu_out {
7716 void set_output_strided(
7717 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7718 TensorOptions options, DimnameList names
7719 ) override {
7720 outputs_[output_idx] = create_out(sizes, strides, options);
7721 if (!names.empty()) {
7722 namedinference::propagate_names(*outputs_[output_idx], names);
7723 }
7724 // super must happen after, so that downstream can use maybe_get_output
7725 // to retrieve the output
7726 }
7727 void set_output_raw_strided(
7728 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7729 TensorOptions options, DimnameList names
7730 ) override {
7731 outputs_[output_idx] = create_out(sizes, strides, options);
7732 if (!names.empty()) {
7733 namedinference::propagate_names(*outputs_[output_idx], names);
7734 }
7735 // super must happen after, so that downstream can use maybe_get_output
7736 // to retrieve the output
7737 }
7738 const Tensor& maybe_get_output(int64_t output_idx) override {
7739 return *outputs_[output_idx];
7740 }
7741 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7742};
7743at::Tensor wrapper_CPU__log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
7744structured_log_softmax_cpu_out_functional op;
7745op.meta(self, dim, half_to_float);
7746op.impl(self, dim, half_to_float, *op.outputs_[0]);
7747return std::move(op.outputs_[0]).take();
7748}
7749struct structured_log_softmax_cpu_out_out final : public at::native::structured_log_softmax_cpu_out {
7750 structured_log_softmax_cpu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
7751 void set_output_strided(
7752 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7753 TensorOptions options, DimnameList names
7754 ) override {
7755 const auto& out = outputs_[output_idx].get();
7756 resize_out(out, sizes, strides, options);
7757 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7758 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7759 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7760 }
7761 if (!names.empty()) {
7762 namedinference::propagate_names(outputs_[output_idx], names);
7763 }
7764 // super must happen after, so that downstream can use maybe_get_output
7765 // to retrieve the output
7766 }
7767 void set_output_raw_strided(
7768 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7769 TensorOptions options, DimnameList names
7770 ) override {
7771 const auto& out = outputs_[output_idx].get();
7772 resize_out(out, sizes, strides, options);
7773 if (!names.empty()) {
7774 namedinference::propagate_names(outputs_[output_idx], names);
7775 }
7776 // super must happen after, so that downstream can use maybe_get_output
7777 // to retrieve the output
7778 }
7779 const Tensor& maybe_get_output(int64_t output_idx) override {
7780 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7781 }
7782 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7783 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7784};
7785at::Tensor & wrapper_CPU__log_softmax_out_out(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
7786structured_log_softmax_cpu_out_out op(out);
7787op.meta(self, dim, half_to_float);
7788op.impl(self, dim, half_to_float, op.maybe_get_output(0));
7789if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7790return out;
7791}
7792struct structured_log_softmax_backward_cpu_out_functional final : public at::native::structured_log_softmax_backward_cpu_out {
7793 void set_output_strided(
7794 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7795 TensorOptions options, DimnameList names
7796 ) override {
7797 outputs_[output_idx] = create_out(sizes, strides, options);
7798 if (!names.empty()) {
7799 namedinference::propagate_names(*outputs_[output_idx], names);
7800 }
7801 // super must happen after, so that downstream can use maybe_get_output
7802 // to retrieve the output
7803 }
7804 void set_output_raw_strided(
7805 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7806 TensorOptions options, DimnameList names
7807 ) override {
7808 outputs_[output_idx] = create_out(sizes, strides, options);
7809 if (!names.empty()) {
7810 namedinference::propagate_names(*outputs_[output_idx], names);
7811 }
7812 // super must happen after, so that downstream can use maybe_get_output
7813 // to retrieve the output
7814 }
7815 const Tensor& maybe_get_output(int64_t output_idx) override {
7816 return *outputs_[output_idx];
7817 }
7818 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
7819};
7820at::Tensor wrapper_CPU__log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
7821structured_log_softmax_backward_cpu_out_functional op;
7822op.meta(grad_output, output, dim, input_dtype);
7823op.impl(grad_output, output, dim, input_dtype, *op.outputs_[0]);
7824return std::move(op.outputs_[0]).take();
7825}
7826struct structured_log_softmax_backward_cpu_out_out final : public at::native::structured_log_softmax_backward_cpu_out {
7827 structured_log_softmax_backward_cpu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
7828 void set_output_strided(
7829 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7830 TensorOptions options, DimnameList names
7831 ) override {
7832 const auto& out = outputs_[output_idx].get();
7833 resize_out(out, sizes, strides, options);
7834 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7835 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7836 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7837 }
7838 if (!names.empty()) {
7839 namedinference::propagate_names(outputs_[output_idx], names);
7840 }
7841 // super must happen after, so that downstream can use maybe_get_output
7842 // to retrieve the output
7843 }
7844 void set_output_raw_strided(
7845 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7846 TensorOptions options, DimnameList names
7847 ) override {
7848 const auto& out = outputs_[output_idx].get();
7849 resize_out(out, sizes, strides, options);
7850 if (!names.empty()) {
7851 namedinference::propagate_names(outputs_[output_idx], names);
7852 }
7853 // super must happen after, so that downstream can use maybe_get_output
7854 // to retrieve the output
7855 }
7856 const Tensor& maybe_get_output(int64_t output_idx) override {
7857 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7858 }
7859 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
7860 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
7861};
7862at::Tensor & wrapper_CPU__log_softmax_backward_data_out_out(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) {
7863structured_log_softmax_backward_cpu_out_out op(out);
7864op.meta(grad_output, output, dim, input_dtype);
7865op.impl(grad_output, output, dim, input_dtype, op.maybe_get_output(0));
7866if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7867return out;
7868}
7869namespace {
7870at::Tensor wrapper_CPU___logcumsumexp(const at::Tensor & self, int64_t dim) {
7871 // No device check
7872 // DeviceGuard omitted
7873 return at::native::_logcumsumexp_cpu(self, dim);
7874}
7875} // anonymous namespace
7876namespace {
7877at::Tensor & wrapper_CPU_out__logcumsumexp_out(const at::Tensor & self, int64_t dim, at::Tensor & out) {
7878 // No device check
7879 // DeviceGuard omitted
7880 return at::native::_logcumsumexp_out_cpu(self, dim, out);
7881}
7882} // anonymous namespace
7883namespace {
7884::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___aminmax(const at::Tensor & self) {
7885 // No device check
7886 // DeviceGuard omitted
7887 return at::native::_aminmax_all(self);
7888}
7889} // anonymous namespace
7890namespace {
7891::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_dim__aminmax(const at::Tensor & self, int64_t dim, bool keepdim) {
7892 // No device check
7893 // DeviceGuard omitted
7894 return at::native::_aminmax(self, dim, keepdim);
7895}
7896} // anonymous namespace
7897struct structured_aminmax_out_functional final : public at::native::structured_aminmax_out {
7898 void set_output_strided(
7899 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7900 TensorOptions options, DimnameList names
7901 ) override {
7902 outputs_[output_idx] = create_out(sizes, strides, options);
7903 if (!names.empty()) {
7904 namedinference::propagate_names(*outputs_[output_idx], names);
7905 }
7906 // super must happen after, so that downstream can use maybe_get_output
7907 // to retrieve the output
7908 }
7909 void set_output_raw_strided(
7910 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7911 TensorOptions options, DimnameList names
7912 ) override {
7913 outputs_[output_idx] = create_out(sizes, strides, options);
7914 if (!names.empty()) {
7915 namedinference::propagate_names(*outputs_[output_idx], names);
7916 }
7917 // super must happen after, so that downstream can use maybe_get_output
7918 // to retrieve the output
7919 }
7920 const Tensor& maybe_get_output(int64_t output_idx) override {
7921 return *outputs_[output_idx];
7922 }
7923 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
7924};
7925::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_aminmax(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
7926structured_aminmax_out_functional op;
7927op.meta(self, dim, keepdim);
7928op.impl(self, dim, keepdim, *op.outputs_[0], *op.outputs_[1]);
7929return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
7930}
7931struct structured_aminmax_out_out final : public at::native::structured_aminmax_out {
7932 structured_aminmax_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
7933 void set_output_strided(
7934 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7935 TensorOptions options, DimnameList names
7936 ) override {
7937 const auto& out = outputs_[output_idx].get();
7938 resize_out(out, sizes, strides, options);
7939 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
7940 if (C10_UNLIKELY(maybe_proxy.has_value())) {
7941 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
7942 }
7943 if (!names.empty()) {
7944 namedinference::propagate_names(outputs_[output_idx], names);
7945 }
7946 // super must happen after, so that downstream can use maybe_get_output
7947 // to retrieve the output
7948 }
7949 void set_output_raw_strided(
7950 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7951 TensorOptions options, DimnameList names
7952 ) override {
7953 const auto& out = outputs_[output_idx].get();
7954 resize_out(out, sizes, strides, options);
7955 if (!names.empty()) {
7956 namedinference::propagate_names(outputs_[output_idx], names);
7957 }
7958 // super must happen after, so that downstream can use maybe_get_output
7959 // to retrieve the output
7960 }
7961 const Tensor& maybe_get_output(int64_t output_idx) override {
7962 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
7963 }
7964 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
7965 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
7966};
7967::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_aminmax_out_out(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max) {
7968structured_aminmax_out_out op(min, max);
7969op.meta(self, dim, keepdim);
7970op.impl(self, dim, keepdim, op.maybe_get_output(0), op.maybe_get_output(1));
7971if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
7972if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
7973return std::forward_as_tuple(min, max);
7974}
7975namespace {
7976at::Tensor wrapper_CPU___compute_linear_combination(const at::Tensor & input, const at::Tensor & coefficients) {
7977 // No device check
7978 // DeviceGuard omitted
7979 return at::native::_compute_linear_combination(input, coefficients);
7980}
7981} // anonymous namespace
7982namespace {
7983at::Tensor & wrapper_CPU_out__compute_linear_combination_out(const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out) {
7984 // No device check
7985 // DeviceGuard omitted
7986 return at::native::_compute_linear_combination_out(input, coefficients, out);
7987}
7988} // anonymous namespace
7989struct structured_max_out_functional final : public at::native::structured_max_out {
7990 void set_output_strided(
7991 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
7992 TensorOptions options, DimnameList names
7993 ) override {
7994 outputs_[output_idx] = create_out(sizes, strides, options);
7995 if (!names.empty()) {
7996 namedinference::propagate_names(*outputs_[output_idx], names);
7997 }
7998 // super must happen after, so that downstream can use maybe_get_output
7999 // to retrieve the output
8000 }
8001 void set_output_raw_strided(
8002 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8003 TensorOptions options, DimnameList names
8004 ) override {
8005 outputs_[output_idx] = create_out(sizes, strides, options);
8006 if (!names.empty()) {
8007 namedinference::propagate_names(*outputs_[output_idx], names);
8008 }
8009 // super must happen after, so that downstream can use maybe_get_output
8010 // to retrieve the output
8011 }
8012 const Tensor& maybe_get_output(int64_t output_idx) override {
8013 return *outputs_[output_idx];
8014 }
8015 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
8016};
8017::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_max_dim(const at::Tensor & self, int64_t dim, bool keepdim) {
8018structured_max_out_functional op;
8019auto precompute = op.meta(self, dim, keepdim);
8020(void)precompute;
8021op.impl(self, precompute.dim, keepdim, *op.outputs_[0], *op.outputs_[1]);
8022return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
8023}
8024struct structured_max_out_out final : public at::native::structured_max_out {
8025 structured_max_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
8026 void set_output_strided(
8027 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8028 TensorOptions options, DimnameList names
8029 ) override {
8030 const auto& out = outputs_[output_idx].get();
8031 resize_out(out, sizes, strides, options);
8032 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8033 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8034 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8035 }
8036 if (!names.empty()) {
8037 namedinference::propagate_names(outputs_[output_idx], names);
8038 }
8039 // super must happen after, so that downstream can use maybe_get_output
8040 // to retrieve the output
8041 }
8042 void set_output_raw_strided(
8043 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8044 TensorOptions options, DimnameList names
8045 ) override {
8046 const auto& out = outputs_[output_idx].get();
8047 resize_out(out, sizes, strides, options);
8048 if (!names.empty()) {
8049 namedinference::propagate_names(outputs_[output_idx], names);
8050 }
8051 // super must happen after, so that downstream can use maybe_get_output
8052 // to retrieve the output
8053 }
8054 const Tensor& maybe_get_output(int64_t output_idx) override {
8055 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8056 }
8057 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
8058 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
8059};
8060::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_max_out_dim_max(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
8061structured_max_out_out op(max, max_values);
8062auto precompute = op.meta(self, dim, keepdim);
8063(void)precompute;
8064op.impl(self, precompute.dim, keepdim, op.maybe_get_output(0), op.maybe_get_output(1));
8065if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8066if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
8067return std::forward_as_tuple(max, max_values);
8068}
8069struct structured_amax_out_functional final : public at::native::structured_amax_out {
8070 void set_output_strided(
8071 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8072 TensorOptions options, DimnameList names
8073 ) override {
8074 outputs_[output_idx] = create_out(sizes, strides, options);
8075 if (!names.empty()) {
8076 namedinference::propagate_names(*outputs_[output_idx], names);
8077 }
8078 // super must happen after, so that downstream can use maybe_get_output
8079 // to retrieve the output
8080 }
8081 void set_output_raw_strided(
8082 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8083 TensorOptions options, DimnameList names
8084 ) override {
8085 outputs_[output_idx] = create_out(sizes, strides, options);
8086 if (!names.empty()) {
8087 namedinference::propagate_names(*outputs_[output_idx], names);
8088 }
8089 // super must happen after, so that downstream can use maybe_get_output
8090 // to retrieve the output
8091 }
8092 const Tensor& maybe_get_output(int64_t output_idx) override {
8093 return *outputs_[output_idx];
8094 }
8095 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8096};
8097at::Tensor wrapper_CPU_amax(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
8098structured_amax_out_functional op;
8099op.meta(self, dim, keepdim);
8100op.impl(self, dim, keepdim, *op.outputs_[0]);
8101return std::move(op.outputs_[0]).take();
8102}
8103struct structured_amax_out_out final : public at::native::structured_amax_out {
8104 structured_amax_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
8105 void set_output_strided(
8106 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8107 TensorOptions options, DimnameList names
8108 ) override {
8109 const auto& out = outputs_[output_idx].get();
8110 resize_out(out, sizes, strides, options);
8111 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8112 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8113 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8114 }
8115 if (!names.empty()) {
8116 namedinference::propagate_names(outputs_[output_idx], names);
8117 }
8118 // super must happen after, so that downstream can use maybe_get_output
8119 // to retrieve the output
8120 }
8121 void set_output_raw_strided(
8122 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8123 TensorOptions options, DimnameList names
8124 ) override {
8125 const auto& out = outputs_[output_idx].get();
8126 resize_out(out, sizes, strides, options);
8127 if (!names.empty()) {
8128 namedinference::propagate_names(outputs_[output_idx], names);
8129 }
8130 // super must happen after, so that downstream can use maybe_get_output
8131 // to retrieve the output
8132 }
8133 const Tensor& maybe_get_output(int64_t output_idx) override {
8134 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8135 }
8136 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8137 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8138};
8139at::Tensor & wrapper_CPU_amax_out_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
8140structured_amax_out_out op(out);
8141op.meta(self, dim, keepdim);
8142op.impl(self, dim, keepdim, op.maybe_get_output(0));
8143if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8144return out;
8145}
8146struct structured_mean_out_functional final : public at::native::structured_mean_out {
8147 void set_output_strided(
8148 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8149 TensorOptions options, DimnameList names
8150 ) override {
8151 outputs_[output_idx] = create_out(sizes, strides, options);
8152 if (!names.empty()) {
8153 namedinference::propagate_names(*outputs_[output_idx], names);
8154 }
8155 // super must happen after, so that downstream can use maybe_get_output
8156 // to retrieve the output
8157 }
8158 void set_output_raw_strided(
8159 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8160 TensorOptions options, DimnameList names
8161 ) override {
8162 outputs_[output_idx] = create_out(sizes, strides, options);
8163 if (!names.empty()) {
8164 namedinference::propagate_names(*outputs_[output_idx], names);
8165 }
8166 // super must happen after, so that downstream can use maybe_get_output
8167 // to retrieve the output
8168 }
8169 const Tensor& maybe_get_output(int64_t output_idx) override {
8170 return *outputs_[output_idx];
8171 }
8172 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8173};
8174at::Tensor wrapper_CPU_mean_dim(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
8175structured_mean_out_functional op;
8176op.meta(self, dim, keepdim, dtype);
8177op.impl(self, dim, keepdim, dtype, *op.outputs_[0]);
8178return std::move(op.outputs_[0]).take();
8179}
8180struct structured_mean_out_out final : public at::native::structured_mean_out {
8181 structured_mean_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
8182 void set_output_strided(
8183 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8184 TensorOptions options, DimnameList names
8185 ) override {
8186 const auto& out = outputs_[output_idx].get();
8187 resize_out(out, sizes, strides, options);
8188 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8189 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8190 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8191 }
8192 if (!names.empty()) {
8193 namedinference::propagate_names(outputs_[output_idx], names);
8194 }
8195 // super must happen after, so that downstream can use maybe_get_output
8196 // to retrieve the output
8197 }
8198 void set_output_raw_strided(
8199 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8200 TensorOptions options, DimnameList names
8201 ) override {
8202 const auto& out = outputs_[output_idx].get();
8203 resize_out(out, sizes, strides, options);
8204 if (!names.empty()) {
8205 namedinference::propagate_names(outputs_[output_idx], names);
8206 }
8207 // super must happen after, so that downstream can use maybe_get_output
8208 // to retrieve the output
8209 }
8210 const Tensor& maybe_get_output(int64_t output_idx) override {
8211 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8212 }
8213 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8214 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8215};
8216at::Tensor & wrapper_CPU_mean_out_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
8217structured_mean_out_out op(out);
8218op.meta(self, dim, keepdim, dtype);
8219op.impl(self, dim, keepdim, dtype, op.maybe_get_output(0));
8220if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8221return out;
8222}
8223namespace {
8224at::Tensor wrapper_CPU__median(const at::Tensor & self) {
8225 // No device check
8226 // DeviceGuard omitted
8227 return at::native::median_cpu(self);
8228}
8229} // anonymous namespace
8230namespace {
8231::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_dim_values_median_out(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
8232 // No device check
8233 // DeviceGuard omitted
8234 return at::native::median_out_cpu(self, dim, keepdim, values, indices);
8235}
8236} // anonymous namespace
8237namespace {
8238at::Tensor wrapper_CPU__nanmedian(const at::Tensor & self) {
8239 // No device check
8240 // DeviceGuard omitted
8241 return at::native::nanmedian_cpu(self);
8242}
8243} // anonymous namespace
8244namespace {
8245::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_dim_values_nanmedian_out(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
8246 // No device check
8247 // DeviceGuard omitted
8248 return at::native::nanmedian_out_cpu(self, dim, keepdim, values, indices);
8249}
8250} // anonymous namespace
8251struct structured_min_out_functional final : public at::native::structured_min_out {
8252 void set_output_strided(
8253 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8254 TensorOptions options, DimnameList names
8255 ) override {
8256 outputs_[output_idx] = create_out(sizes, strides, options);
8257 if (!names.empty()) {
8258 namedinference::propagate_names(*outputs_[output_idx], names);
8259 }
8260 // super must happen after, so that downstream can use maybe_get_output
8261 // to retrieve the output
8262 }
8263 void set_output_raw_strided(
8264 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8265 TensorOptions options, DimnameList names
8266 ) override {
8267 outputs_[output_idx] = create_out(sizes, strides, options);
8268 if (!names.empty()) {
8269 namedinference::propagate_names(*outputs_[output_idx], names);
8270 }
8271 // super must happen after, so that downstream can use maybe_get_output
8272 // to retrieve the output
8273 }
8274 const Tensor& maybe_get_output(int64_t output_idx) override {
8275 return *outputs_[output_idx];
8276 }
8277 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
8278};
8279::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_min_dim(const at::Tensor & self, int64_t dim, bool keepdim) {
8280structured_min_out_functional op;
8281auto precompute = op.meta(self, dim, keepdim);
8282(void)precompute;
8283op.impl(self, precompute.dim, keepdim, *op.outputs_[0], *op.outputs_[1]);
8284return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
8285}
8286struct structured_min_out_out final : public at::native::structured_min_out {
8287 structured_min_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
8288 void set_output_strided(
8289 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8290 TensorOptions options, DimnameList names
8291 ) override {
8292 const auto& out = outputs_[output_idx].get();
8293 resize_out(out, sizes, strides, options);
8294 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8295 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8296 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8297 }
8298 if (!names.empty()) {
8299 namedinference::propagate_names(outputs_[output_idx], names);
8300 }
8301 // super must happen after, so that downstream can use maybe_get_output
8302 // to retrieve the output
8303 }
8304 void set_output_raw_strided(
8305 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8306 TensorOptions options, DimnameList names
8307 ) override {
8308 const auto& out = outputs_[output_idx].get();
8309 resize_out(out, sizes, strides, options);
8310 if (!names.empty()) {
8311 namedinference::propagate_names(outputs_[output_idx], names);
8312 }
8313 // super must happen after, so that downstream can use maybe_get_output
8314 // to retrieve the output
8315 }
8316 const Tensor& maybe_get_output(int64_t output_idx) override {
8317 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8318 }
8319 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
8320 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
8321};
8322::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_min_out_dim_min(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
8323structured_min_out_out op(min, min_indices);
8324auto precompute = op.meta(self, dim, keepdim);
8325(void)precompute;
8326op.impl(self, precompute.dim, keepdim, op.maybe_get_output(0), op.maybe_get_output(1));
8327if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8328if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
8329return std::forward_as_tuple(min, min_indices);
8330}
8331struct structured_amin_out_functional final : public at::native::structured_amin_out {
8332 void set_output_strided(
8333 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8334 TensorOptions options, DimnameList names
8335 ) override {
8336 outputs_[output_idx] = create_out(sizes, strides, options);
8337 if (!names.empty()) {
8338 namedinference::propagate_names(*outputs_[output_idx], names);
8339 }
8340 // super must happen after, so that downstream can use maybe_get_output
8341 // to retrieve the output
8342 }
8343 void set_output_raw_strided(
8344 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8345 TensorOptions options, DimnameList names
8346 ) override {
8347 outputs_[output_idx] = create_out(sizes, strides, options);
8348 if (!names.empty()) {
8349 namedinference::propagate_names(*outputs_[output_idx], names);
8350 }
8351 // super must happen after, so that downstream can use maybe_get_output
8352 // to retrieve the output
8353 }
8354 const Tensor& maybe_get_output(int64_t output_idx) override {
8355 return *outputs_[output_idx];
8356 }
8357 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8358};
8359at::Tensor wrapper_CPU_amin(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
8360structured_amin_out_functional op;
8361op.meta(self, dim, keepdim);
8362op.impl(self, dim, keepdim, *op.outputs_[0]);
8363return std::move(op.outputs_[0]).take();
8364}
8365struct structured_amin_out_out final : public at::native::structured_amin_out {
8366 structured_amin_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
8367 void set_output_strided(
8368 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8369 TensorOptions options, DimnameList names
8370 ) override {
8371 const auto& out = outputs_[output_idx].get();
8372 resize_out(out, sizes, strides, options);
8373 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8374 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8375 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8376 }
8377 if (!names.empty()) {
8378 namedinference::propagate_names(outputs_[output_idx], names);
8379 }
8380 // super must happen after, so that downstream can use maybe_get_output
8381 // to retrieve the output
8382 }
8383 void set_output_raw_strided(
8384 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8385 TensorOptions options, DimnameList names
8386 ) override {
8387 const auto& out = outputs_[output_idx].get();
8388 resize_out(out, sizes, strides, options);
8389 if (!names.empty()) {
8390 namedinference::propagate_names(outputs_[output_idx], names);
8391 }
8392 // super must happen after, so that downstream can use maybe_get_output
8393 // to retrieve the output
8394 }
8395 const Tensor& maybe_get_output(int64_t output_idx) override {
8396 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8397 }
8398 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8399 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8400};
8401at::Tensor & wrapper_CPU_amin_out_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
8402structured_amin_out_out op(out);
8403op.meta(self, dim, keepdim);
8404op.impl(self, dim, keepdim, op.maybe_get_output(0));
8405if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8406return out;
8407}
8408namespace {
8409::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__mkldnn_rnn_layer(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
8410 // No device check
8411 // DeviceGuard omitted
8412 return at::native::mkldnn_rnn_layer(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
8413}
8414} // anonymous namespace
8415namespace {
8416::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__mkldnn_rnn_layer_backward(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) {
8417 // No device check
8418 // DeviceGuard omitted
8419 return at::native::mkldnn_rnn_layer_backward(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace);
8420}
8421} // anonymous namespace
8422struct structured_mm_out_cpu_functional final : public at::native::structured_mm_out_cpu {
8423 void set_output_strided(
8424 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8425 TensorOptions options, DimnameList names
8426 ) override {
8427 outputs_[output_idx] = create_out(sizes, strides, options);
8428 if (!names.empty()) {
8429 namedinference::propagate_names(*outputs_[output_idx], names);
8430 }
8431 // super must happen after, so that downstream can use maybe_get_output
8432 // to retrieve the output
8433 }
8434 void set_output_raw_strided(
8435 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8436 TensorOptions options, DimnameList names
8437 ) override {
8438 outputs_[output_idx] = create_out(sizes, strides, options);
8439 if (!names.empty()) {
8440 namedinference::propagate_names(*outputs_[output_idx], names);
8441 }
8442 // super must happen after, so that downstream can use maybe_get_output
8443 // to retrieve the output
8444 }
8445 const Tensor& maybe_get_output(int64_t output_idx) override {
8446 return *outputs_[output_idx];
8447 }
8448 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8449};
8450at::Tensor wrapper_CPU_mm(const at::Tensor & self, const at::Tensor & mat2) {
8451structured_mm_out_cpu_functional op;
8452op.meta(self, mat2);
8453op.impl(self, mat2, *op.outputs_[0]);
8454return std::move(op.outputs_[0]).take();
8455}
8456struct structured_mm_out_cpu_out final : public at::native::structured_mm_out_cpu {
8457 structured_mm_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
8458 void set_output_strided(
8459 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8460 TensorOptions options, DimnameList names
8461 ) override {
8462 const auto& out = outputs_[output_idx].get();
8463 resize_out(out, sizes, strides, options);
8464 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8465 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8466 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8467 }
8468 if (!names.empty()) {
8469 namedinference::propagate_names(outputs_[output_idx], names);
8470 }
8471 // super must happen after, so that downstream can use maybe_get_output
8472 // to retrieve the output
8473 }
8474 void set_output_raw_strided(
8475 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8476 TensorOptions options, DimnameList names
8477 ) override {
8478 const auto& out = outputs_[output_idx].get();
8479 resize_out(out, sizes, strides, options);
8480 if (!names.empty()) {
8481 namedinference::propagate_names(outputs_[output_idx], names);
8482 }
8483 // super must happen after, so that downstream can use maybe_get_output
8484 // to retrieve the output
8485 }
8486 const Tensor& maybe_get_output(int64_t output_idx) override {
8487 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8488 }
8489 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8490 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8491};
8492at::Tensor & wrapper_CPU_mm_out_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
8493structured_mm_out_cpu_out op(out);
8494op.meta(self, mat2);
8495op.impl(self, mat2, op.maybe_get_output(0));
8496if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8497return out;
8498}
8499namespace {
8500::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__mode(const at::Tensor & self, int64_t dim, bool keepdim) {
8501 // No device check
8502 // DeviceGuard omitted
8503 return at::native::mode(self, dim, keepdim);
8504}
8505} // anonymous namespace
8506struct structured_mul_out_functional final : public at::native::structured_mul_out {
8507 void set_output_strided(
8508 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8509 TensorOptions options, DimnameList names
8510 ) override {
8511 outputs_[output_idx] = create_out(sizes, strides, options);
8512 if (!names.empty()) {
8513 namedinference::propagate_names(*outputs_[output_idx], names);
8514 }
8515 // super must happen after, so that downstream can use maybe_get_output
8516 // to retrieve the output
8517 at::native::structured_mul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8518 }
8519 void set_output_raw_strided(
8520 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8521 TensorOptions options, DimnameList names
8522 ) override {
8523 outputs_[output_idx] = create_out(sizes, strides, options);
8524 if (!names.empty()) {
8525 namedinference::propagate_names(*outputs_[output_idx], names);
8526 }
8527 // super must happen after, so that downstream can use maybe_get_output
8528 // to retrieve the output
8529 at::native::structured_mul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8530 }
8531 const Tensor& maybe_get_output(int64_t output_idx) override {
8532 return *outputs_[output_idx];
8533 }
8534 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8535};
8536at::Tensor wrapper_CPU_mul_Tensor(const at::Tensor & self, const at::Tensor & other) {
8537structured_mul_out_functional op;
8538op.meta(self, other);
8539op.impl(self, other, *op.outputs_[0]);
8540return std::move(op.outputs_[0]).take();
8541}
8542struct structured_mul_out_out final : public at::native::structured_mul_out {
8543 structured_mul_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
8544 void set_output_strided(
8545 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8546 TensorOptions options, DimnameList names
8547 ) override {
8548 const auto& out = outputs_[output_idx].get();
8549 resize_out(out, sizes, strides, options);
8550 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8551 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8552 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8553 }
8554 if (!names.empty()) {
8555 namedinference::propagate_names(outputs_[output_idx], names);
8556 }
8557 // super must happen after, so that downstream can use maybe_get_output
8558 // to retrieve the output
8559 at::native::structured_mul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8560 }
8561 void set_output_raw_strided(
8562 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8563 TensorOptions options, DimnameList names
8564 ) override {
8565 const auto& out = outputs_[output_idx].get();
8566 resize_out(out, sizes, strides, options);
8567 if (!names.empty()) {
8568 namedinference::propagate_names(outputs_[output_idx], names);
8569 }
8570 // super must happen after, so that downstream can use maybe_get_output
8571 // to retrieve the output
8572 at::native::structured_mul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8573 }
8574 const Tensor& maybe_get_output(int64_t output_idx) override {
8575 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8576 }
8577 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8578 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8579};
8580at::Tensor & wrapper_CPU_mul_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8581structured_mul_out_out op(out);
8582op.meta(self, other);
8583op.impl(self, other, op.maybe_get_output(0));
8584if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8585return out;
8586}
8587struct structured_mul_out_inplace final : public at::native::structured_mul_out {
8588 structured_mul_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
8589 void set_output_strided(
8590 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8591 TensorOptions options, DimnameList names
8592 ) override {
8593 const auto& out = outputs_[output_idx].get();
8594 check_inplace(out, sizes, options);
8595 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8596 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8597 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8598 }
8599 if (!names.empty()) {
8600 namedinference::propagate_names(outputs_[output_idx], names);
8601 }
8602 // super must happen after, so that downstream can use maybe_get_output
8603 // to retrieve the output
8604 at::native::structured_mul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8605 }
8606 void set_output_raw_strided(
8607 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8608 TensorOptions options, DimnameList names
8609 ) override {
8610 const auto& out = outputs_[output_idx].get();
8611 check_inplace(out, sizes, options);
8612 if (!names.empty()) {
8613 namedinference::propagate_names(outputs_[output_idx], names);
8614 }
8615 // super must happen after, so that downstream can use maybe_get_output
8616 // to retrieve the output
8617 at::native::structured_mul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8618 }
8619 const Tensor& maybe_get_output(int64_t output_idx) override {
8620 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8621 }
8622 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8623 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8624};
8625at::Tensor & wrapper_CPU_mul__Tensor(at::Tensor & self, const at::Tensor & other) {
8626structured_mul_out_inplace op(self);
8627op.meta(self, other);
8628op.impl(self, other, op.outputs_[0]);
8629if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8630return self;
8631}
8632namespace {
8633at::Tensor & wrapper_CPU_out_mvlgamma_out(const at::Tensor & self, int64_t p, at::Tensor & out) {
8634 // No device check
8635 // DeviceGuard omitted
8636 return at::native::mvlgamma_out(self, p, out);
8637}
8638} // anonymous namespace
8639namespace {
8640at::Tensor wrapper_CPU__narrow_copy(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
8641 // No device check
8642 // DeviceGuard omitted
8643 return at::native::narrow_copy_dense_cpu(self, dim, start.expect_int(), length.expect_int());
8644}
8645} // anonymous namespace
8646namespace {
8647at::Tensor & wrapper_CPU_out_narrow_copy_out(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) {
8648 // No device check
8649 // DeviceGuard omitted
8650 return at::native::narrow_copy_dense_cpu_out(self, dim, start.expect_int(), length.expect_int(), out);
8651}
8652} // anonymous namespace
8653namespace {
8654::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__native_batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
8655 // No device check
8656 // DeviceGuard omitted
8657 return at::native::batch_norm_cpu(input, weight, bias, running_mean, running_var, training, momentum, eps);
8658}
8659} // anonymous namespace
8660namespace {
8661::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_out_native_batch_norm_out(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
8662 // No device check
8663 // DeviceGuard omitted
8664 return at::native::batch_norm_cpu_out(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
8665}
8666} // anonymous namespace
8667namespace {
8668::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_out__native_batch_norm_legit_out(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
8669 // No device check
8670 // DeviceGuard omitted
8671 return at::native::_batch_norm_legit_cpu_out(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
8672}
8673} // anonymous namespace
8674namespace {
8675::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___native_batch_norm_legit(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
8676 // No device check
8677 // DeviceGuard omitted
8678 return at::native::_batch_norm_legit_cpu(input, weight, bias, running_mean, running_var, training, momentum, eps);
8679}
8680} // anonymous namespace
8681namespace {
8682::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU_no_stats__native_batch_norm_legit(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
8683 // No device check
8684 // DeviceGuard omitted
8685 return at::native::_batch_norm_legit_no_stats_cpu(input, weight, bias, training, momentum, eps);
8686}
8687} // anonymous namespace
8688namespace {
8689::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_no_stats_out__native_batch_norm_legit_out(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
8690 // No device check
8691 // DeviceGuard omitted
8692 return at::native::_batch_norm_legit_no_stats_cpu_out(input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
8693}
8694} // anonymous namespace
8695namespace {
8696::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
8697 // No device check
8698 // DeviceGuard omitted
8699 return at::native::batch_norm_backward_cpu(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
8700}
8701} // anonymous namespace
8702namespace {
8703::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__batch_norm_update_stats(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum) {
8704 // No device check
8705 // DeviceGuard omitted
8706 return at::native::batch_norm_update_stats_cpu(input, running_mean, running_var, momentum);
8707}
8708} // anonymous namespace
8709namespace {
8710at::Tensor wrapper_CPU___cdist_forward(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
8711 // No device check
8712 // DeviceGuard omitted
8713 return at::native::_cdist_forward(x1, x2, p, compute_mode);
8714}
8715} // anonymous namespace
8716namespace {
8717at::Tensor wrapper_CPU___cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
8718 // No device check
8719 // DeviceGuard omitted
8720 return at::native::_cdist_backward(grad, x1, x2, p, cdist);
8721}
8722} // anonymous namespace
8723namespace {
8724at::Tensor wrapper_CPU___pdist_forward(const at::Tensor & self, double p) {
8725 // No device check
8726 // DeviceGuard omitted
8727 return at::native::_pdist_forward(self, p);
8728}
8729} // anonymous namespace
8730namespace {
8731at::Tensor wrapper_CPU___pdist_backward(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
8732 // No device check
8733 // DeviceGuard omitted
8734 return at::native::_pdist_backward(grad, self, p, pdist);
8735}
8736} // anonymous namespace
8737namespace {
8738at::Tensor wrapper_CPU__pixel_shuffle(const at::Tensor & self, int64_t upscale_factor) {
8739 // No device check
8740 // DeviceGuard omitted
8741 return at::native::pixel_shuffle_cpu(self, upscale_factor);
8742}
8743} // anonymous namespace
8744namespace {
8745at::Tensor wrapper_CPU__pixel_unshuffle(const at::Tensor & self, int64_t downscale_factor) {
8746 // No device check
8747 // DeviceGuard omitted
8748 return at::native::pixel_unshuffle_cpu(self, downscale_factor);
8749}
8750} // anonymous namespace
8751namespace {
8752at::Tensor wrapper_CPU__channel_shuffle(const at::Tensor & self, int64_t groups) {
8753 // No device check
8754 // DeviceGuard omitted
8755 return at::native::channel_shuffle(self, groups);
8756}
8757} // anonymous namespace
8758namespace {
8759at::Tensor wrapper_CPU__native_channel_shuffle(const at::Tensor & self, int64_t groups) {
8760 // No device check
8761 // DeviceGuard omitted
8762 return at::native::channel_shuffle_cpu(self, groups);
8763}
8764} // anonymous namespace
8765namespace {
8766at::Tensor & wrapper_CPU_generator_out_randperm_out(int64_t n, c10::optional<at::Generator> generator, at::Tensor & out) {
8767 // No device check
8768 // DeviceGuard omitted
8769 return at::native::randperm_out_cpu(n, generator, out);
8770}
8771} // anonymous namespace
8772namespace {
8773at::Tensor & wrapper_CPU_out_range_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
8774 // No device check
8775 // DeviceGuard omitted
8776 return at::native::range_out(start, end, step, out);
8777}
8778} // anonymous namespace
8779struct structured_reciprocal_out_functional final : public at::native::structured_reciprocal_out {
8780 void set_output_strided(
8781 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8782 TensorOptions options, DimnameList names
8783 ) override {
8784 outputs_[output_idx] = create_out(sizes, strides, options);
8785 if (!names.empty()) {
8786 namedinference::propagate_names(*outputs_[output_idx], names);
8787 }
8788 // super must happen after, so that downstream can use maybe_get_output
8789 // to retrieve the output
8790 at::native::structured_reciprocal_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8791 }
8792 void set_output_raw_strided(
8793 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8794 TensorOptions options, DimnameList names
8795 ) override {
8796 outputs_[output_idx] = create_out(sizes, strides, options);
8797 if (!names.empty()) {
8798 namedinference::propagate_names(*outputs_[output_idx], names);
8799 }
8800 // super must happen after, so that downstream can use maybe_get_output
8801 // to retrieve the output
8802 at::native::structured_reciprocal_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8803 }
8804 const Tensor& maybe_get_output(int64_t output_idx) override {
8805 return *outputs_[output_idx];
8806 }
8807 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8808};
8809at::Tensor wrapper_CPU_reciprocal(const at::Tensor & self) {
8810structured_reciprocal_out_functional op;
8811op.meta(self);
8812op.impl(self, *op.outputs_[0]);
8813return std::move(op.outputs_[0]).take();
8814}
8815struct structured_reciprocal_out_out final : public at::native::structured_reciprocal_out {
8816 structured_reciprocal_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
8817 void set_output_strided(
8818 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8819 TensorOptions options, DimnameList names
8820 ) override {
8821 const auto& out = outputs_[output_idx].get();
8822 resize_out(out, sizes, strides, options);
8823 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8824 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8825 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8826 }
8827 if (!names.empty()) {
8828 namedinference::propagate_names(outputs_[output_idx], names);
8829 }
8830 // super must happen after, so that downstream can use maybe_get_output
8831 // to retrieve the output
8832 at::native::structured_reciprocal_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8833 }
8834 void set_output_raw_strided(
8835 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8836 TensorOptions options, DimnameList names
8837 ) override {
8838 const auto& out = outputs_[output_idx].get();
8839 resize_out(out, sizes, strides, options);
8840 if (!names.empty()) {
8841 namedinference::propagate_names(outputs_[output_idx], names);
8842 }
8843 // super must happen after, so that downstream can use maybe_get_output
8844 // to retrieve the output
8845 at::native::structured_reciprocal_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8846 }
8847 const Tensor& maybe_get_output(int64_t output_idx) override {
8848 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8849 }
8850 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8851 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8852};
8853at::Tensor & wrapper_CPU_reciprocal_out_out(const at::Tensor & self, at::Tensor & out) {
8854structured_reciprocal_out_out op(out);
8855op.meta(self);
8856op.impl(self, op.maybe_get_output(0));
8857if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8858return out;
8859}
8860struct structured_reciprocal_out_inplace final : public at::native::structured_reciprocal_out {
8861 structured_reciprocal_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
8862 void set_output_strided(
8863 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8864 TensorOptions options, DimnameList names
8865 ) override {
8866 const auto& out = outputs_[output_idx].get();
8867 check_inplace(out, sizes, options);
8868 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8869 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8870 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8871 }
8872 if (!names.empty()) {
8873 namedinference::propagate_names(outputs_[output_idx], names);
8874 }
8875 // super must happen after, so that downstream can use maybe_get_output
8876 // to retrieve the output
8877 at::native::structured_reciprocal_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8878 }
8879 void set_output_raw_strided(
8880 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8881 TensorOptions options, DimnameList names
8882 ) override {
8883 const auto& out = outputs_[output_idx].get();
8884 check_inplace(out, sizes, options);
8885 if (!names.empty()) {
8886 namedinference::propagate_names(outputs_[output_idx], names);
8887 }
8888 // super must happen after, so that downstream can use maybe_get_output
8889 // to retrieve the output
8890 at::native::structured_reciprocal_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8891 }
8892 const Tensor& maybe_get_output(int64_t output_idx) override {
8893 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8894 }
8895 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8896 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8897};
8898at::Tensor & wrapper_CPU_reciprocal_(at::Tensor & self) {
8899structured_reciprocal_out_inplace op(self);
8900op.meta(self);
8901op.impl(self, op.outputs_[0]);
8902if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8903return self;
8904}
8905struct structured_neg_out_functional final : public at::native::structured_neg_out {
8906 void set_output_strided(
8907 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8908 TensorOptions options, DimnameList names
8909 ) override {
8910 outputs_[output_idx] = create_out(sizes, strides, options);
8911 if (!names.empty()) {
8912 namedinference::propagate_names(*outputs_[output_idx], names);
8913 }
8914 // super must happen after, so that downstream can use maybe_get_output
8915 // to retrieve the output
8916 at::native::structured_neg_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8917 }
8918 void set_output_raw_strided(
8919 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8920 TensorOptions options, DimnameList names
8921 ) override {
8922 outputs_[output_idx] = create_out(sizes, strides, options);
8923 if (!names.empty()) {
8924 namedinference::propagate_names(*outputs_[output_idx], names);
8925 }
8926 // super must happen after, so that downstream can use maybe_get_output
8927 // to retrieve the output
8928 at::native::structured_neg_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8929 }
8930 const Tensor& maybe_get_output(int64_t output_idx) override {
8931 return *outputs_[output_idx];
8932 }
8933 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
8934};
8935at::Tensor wrapper_CPU_neg(const at::Tensor & self) {
8936structured_neg_out_functional op;
8937op.meta(self);
8938op.impl(self, *op.outputs_[0]);
8939return std::move(op.outputs_[0]).take();
8940}
8941struct structured_neg_out_out final : public at::native::structured_neg_out {
8942 structured_neg_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
8943 void set_output_strided(
8944 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8945 TensorOptions options, DimnameList names
8946 ) override {
8947 const auto& out = outputs_[output_idx].get();
8948 resize_out(out, sizes, strides, options);
8949 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8950 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8951 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8952 }
8953 if (!names.empty()) {
8954 namedinference::propagate_names(outputs_[output_idx], names);
8955 }
8956 // super must happen after, so that downstream can use maybe_get_output
8957 // to retrieve the output
8958 at::native::structured_neg_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8959 }
8960 void set_output_raw_strided(
8961 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8962 TensorOptions options, DimnameList names
8963 ) override {
8964 const auto& out = outputs_[output_idx].get();
8965 resize_out(out, sizes, strides, options);
8966 if (!names.empty()) {
8967 namedinference::propagate_names(outputs_[output_idx], names);
8968 }
8969 // super must happen after, so that downstream can use maybe_get_output
8970 // to retrieve the output
8971 at::native::structured_neg_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
8972 }
8973 const Tensor& maybe_get_output(int64_t output_idx) override {
8974 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
8975 }
8976 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
8977 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
8978};
8979at::Tensor & wrapper_CPU_neg_out_out(const at::Tensor & self, at::Tensor & out) {
8980structured_neg_out_out op(out);
8981op.meta(self);
8982op.impl(self, op.maybe_get_output(0));
8983if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
8984return out;
8985}
8986struct structured_neg_out_inplace final : public at::native::structured_neg_out {
8987 structured_neg_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
8988 void set_output_strided(
8989 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
8990 TensorOptions options, DimnameList names
8991 ) override {
8992 const auto& out = outputs_[output_idx].get();
8993 check_inplace(out, sizes, options);
8994 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
8995 if (C10_UNLIKELY(maybe_proxy.has_value())) {
8996 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
8997 }
8998 if (!names.empty()) {
8999 namedinference::propagate_names(outputs_[output_idx], names);
9000 }
9001 // super must happen after, so that downstream can use maybe_get_output
9002 // to retrieve the output
9003 at::native::structured_neg_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9004 }
9005 void set_output_raw_strided(
9006 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9007 TensorOptions options, DimnameList names
9008 ) override {
9009 const auto& out = outputs_[output_idx].get();
9010 check_inplace(out, sizes, options);
9011 if (!names.empty()) {
9012 namedinference::propagate_names(outputs_[output_idx], names);
9013 }
9014 // super must happen after, so that downstream can use maybe_get_output
9015 // to retrieve the output
9016 at::native::structured_neg_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9017 }
9018 const Tensor& maybe_get_output(int64_t output_idx) override {
9019 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9020 }
9021 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9022 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9023};
9024at::Tensor & wrapper_CPU_neg_(at::Tensor & self) {
9025structured_neg_out_inplace op(self);
9026op.meta(self);
9027op.impl(self, op.outputs_[0]);
9028if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9029return self;
9030}
9031namespace {
9032at::Tensor wrapper_CPU_Tensor_repeat_interleave(const at::Tensor & repeats, c10::optional<int64_t> output_size) {
9033 // No device check
9034 // DeviceGuard omitted
9035 return at::native::repeat_interleave_cpu(repeats, output_size);
9036}
9037} // anonymous namespace
9038namespace {
9039at::Tensor wrapper_CPU___reshape_alias(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
9040 // No device check
9041 // DeviceGuard omitted
9042 return at::native::_reshape_alias(self, C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride));
9043}
9044} // anonymous namespace
9045struct structured_round_out_functional final : public at::native::structured_round_out {
9046 void set_output_strided(
9047 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9048 TensorOptions options, DimnameList names
9049 ) override {
9050 outputs_[output_idx] = create_out(sizes, strides, options);
9051 if (!names.empty()) {
9052 namedinference::propagate_names(*outputs_[output_idx], names);
9053 }
9054 // super must happen after, so that downstream can use maybe_get_output
9055 // to retrieve the output
9056 at::native::structured_round_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9057 }
9058 void set_output_raw_strided(
9059 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9060 TensorOptions options, DimnameList names
9061 ) override {
9062 outputs_[output_idx] = create_out(sizes, strides, options);
9063 if (!names.empty()) {
9064 namedinference::propagate_names(*outputs_[output_idx], names);
9065 }
9066 // super must happen after, so that downstream can use maybe_get_output
9067 // to retrieve the output
9068 at::native::structured_round_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9069 }
9070 const Tensor& maybe_get_output(int64_t output_idx) override {
9071 return *outputs_[output_idx];
9072 }
9073 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9074};
9075at::Tensor wrapper_CPU_round(const at::Tensor & self) {
9076structured_round_out_functional op;
9077op.meta(self);
9078op.impl(self, *op.outputs_[0]);
9079return std::move(op.outputs_[0]).take();
9080}
9081struct structured_round_out_out final : public at::native::structured_round_out {
9082 structured_round_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
9083 void set_output_strided(
9084 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9085 TensorOptions options, DimnameList names
9086 ) override {
9087 const auto& out = outputs_[output_idx].get();
9088 resize_out(out, sizes, strides, options);
9089 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9090 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9091 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9092 }
9093 if (!names.empty()) {
9094 namedinference::propagate_names(outputs_[output_idx], names);
9095 }
9096 // super must happen after, so that downstream can use maybe_get_output
9097 // to retrieve the output
9098 at::native::structured_round_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9099 }
9100 void set_output_raw_strided(
9101 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9102 TensorOptions options, DimnameList names
9103 ) override {
9104 const auto& out = outputs_[output_idx].get();
9105 resize_out(out, sizes, strides, options);
9106 if (!names.empty()) {
9107 namedinference::propagate_names(outputs_[output_idx], names);
9108 }
9109 // super must happen after, so that downstream can use maybe_get_output
9110 // to retrieve the output
9111 at::native::structured_round_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9112 }
9113 const Tensor& maybe_get_output(int64_t output_idx) override {
9114 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9115 }
9116 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9117 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9118};
9119at::Tensor & wrapper_CPU_round_out_out(const at::Tensor & self, at::Tensor & out) {
9120structured_round_out_out op(out);
9121op.meta(self);
9122op.impl(self, op.maybe_get_output(0));
9123if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9124return out;
9125}
9126struct structured_round_out_inplace final : public at::native::structured_round_out {
9127 structured_round_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
9128 void set_output_strided(
9129 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9130 TensorOptions options, DimnameList names
9131 ) override {
9132 const auto& out = outputs_[output_idx].get();
9133 check_inplace(out, sizes, options);
9134 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9135 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9136 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9137 }
9138 if (!names.empty()) {
9139 namedinference::propagate_names(outputs_[output_idx], names);
9140 }
9141 // super must happen after, so that downstream can use maybe_get_output
9142 // to retrieve the output
9143 at::native::structured_round_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9144 }
9145 void set_output_raw_strided(
9146 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9147 TensorOptions options, DimnameList names
9148 ) override {
9149 const auto& out = outputs_[output_idx].get();
9150 check_inplace(out, sizes, options);
9151 if (!names.empty()) {
9152 namedinference::propagate_names(outputs_[output_idx], names);
9153 }
9154 // super must happen after, so that downstream can use maybe_get_output
9155 // to retrieve the output
9156 at::native::structured_round_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9157 }
9158 const Tensor& maybe_get_output(int64_t output_idx) override {
9159 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9160 }
9161 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9162 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9163};
9164at::Tensor & wrapper_CPU_round_(at::Tensor & self) {
9165structured_round_out_inplace op(self);
9166op.meta(self);
9167op.impl(self, op.outputs_[0]);
9168if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9169return self;
9170}
9171struct structured_round_decimals_out_functional final : public at::native::structured_round_decimals_out {
9172 void set_output_strided(
9173 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9174 TensorOptions options, DimnameList names
9175 ) override {
9176 outputs_[output_idx] = create_out(sizes, strides, options);
9177 if (!names.empty()) {
9178 namedinference::propagate_names(*outputs_[output_idx], names);
9179 }
9180 // super must happen after, so that downstream can use maybe_get_output
9181 // to retrieve the output
9182 at::native::structured_round_decimals_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9183 }
9184 void set_output_raw_strided(
9185 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9186 TensorOptions options, DimnameList names
9187 ) override {
9188 outputs_[output_idx] = create_out(sizes, strides, options);
9189 if (!names.empty()) {
9190 namedinference::propagate_names(*outputs_[output_idx], names);
9191 }
9192 // super must happen after, so that downstream can use maybe_get_output
9193 // to retrieve the output
9194 at::native::structured_round_decimals_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9195 }
9196 const Tensor& maybe_get_output(int64_t output_idx) override {
9197 return *outputs_[output_idx];
9198 }
9199 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9200};
9201at::Tensor wrapper_CPU_round_decimals(const at::Tensor & self, int64_t decimals) {
9202structured_round_decimals_out_functional op;
9203op.meta(self, decimals);
9204op.impl(self, decimals, *op.outputs_[0]);
9205return std::move(op.outputs_[0]).take();
9206}
9207struct structured_round_decimals_out_out final : public at::native::structured_round_decimals_out {
9208 structured_round_decimals_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
9209 void set_output_strided(
9210 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9211 TensorOptions options, DimnameList names
9212 ) override {
9213 const auto& out = outputs_[output_idx].get();
9214 resize_out(out, sizes, strides, options);
9215 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9216 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9217 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9218 }
9219 if (!names.empty()) {
9220 namedinference::propagate_names(outputs_[output_idx], names);
9221 }
9222 // super must happen after, so that downstream can use maybe_get_output
9223 // to retrieve the output
9224 at::native::structured_round_decimals_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9225 }
9226 void set_output_raw_strided(
9227 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9228 TensorOptions options, DimnameList names
9229 ) override {
9230 const auto& out = outputs_[output_idx].get();
9231 resize_out(out, sizes, strides, options);
9232 if (!names.empty()) {
9233 namedinference::propagate_names(outputs_[output_idx], names);
9234 }
9235 // super must happen after, so that downstream can use maybe_get_output
9236 // to retrieve the output
9237 at::native::structured_round_decimals_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9238 }
9239 const Tensor& maybe_get_output(int64_t output_idx) override {
9240 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9241 }
9242 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9243 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9244};
9245at::Tensor & wrapper_CPU_round_out_decimals_out(const at::Tensor & self, int64_t decimals, at::Tensor & out) {
9246structured_round_decimals_out_out op(out);
9247op.meta(self, decimals);
9248op.impl(self, decimals, op.maybe_get_output(0));
9249if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9250return out;
9251}
9252struct structured_round_decimals_out_inplace final : public at::native::structured_round_decimals_out {
9253 structured_round_decimals_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
9254 void set_output_strided(
9255 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9256 TensorOptions options, DimnameList names
9257 ) override {
9258 const auto& out = outputs_[output_idx].get();
9259 check_inplace(out, sizes, options);
9260 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9261 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9262 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9263 }
9264 if (!names.empty()) {
9265 namedinference::propagate_names(outputs_[output_idx], names);
9266 }
9267 // super must happen after, so that downstream can use maybe_get_output
9268 // to retrieve the output
9269 at::native::structured_round_decimals_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9270 }
9271 void set_output_raw_strided(
9272 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9273 TensorOptions options, DimnameList names
9274 ) override {
9275 const auto& out = outputs_[output_idx].get();
9276 check_inplace(out, sizes, options);
9277 if (!names.empty()) {
9278 namedinference::propagate_names(outputs_[output_idx], names);
9279 }
9280 // super must happen after, so that downstream can use maybe_get_output
9281 // to retrieve the output
9282 at::native::structured_round_decimals_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9283 }
9284 const Tensor& maybe_get_output(int64_t output_idx) override {
9285 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9286 }
9287 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9288 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9289};
9290at::Tensor & wrapper_CPU_round__decimals(at::Tensor & self, int64_t decimals) {
9291structured_round_decimals_out_inplace op(self);
9292op.meta(self, decimals);
9293op.impl(self, decimals, op.outputs_[0]);
9294if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9295return self;
9296}
9297namespace {
9298at::Tensor wrapper_CPU__relu(const at::Tensor & self) {
9299 // No device check
9300 // DeviceGuard omitted
9301 return at::native::relu(self);
9302}
9303} // anonymous namespace
9304namespace {
9305at::Tensor & wrapper_CPU__relu_(at::Tensor & self) {
9306 // No device check
9307 // DeviceGuard omitted
9308 return at::native::relu_(self);
9309}
9310} // anonymous namespace
9311namespace {
9312at::Tensor wrapper_CPU___prelu_kernel(const at::Tensor & self, const at::Tensor & weight) {
9313 // No device check
9314 // DeviceGuard omitted
9315 return at::native::_prelu_kernel(self, weight);
9316}
9317} // anonymous namespace
9318namespace {
9319::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___prelu_kernel_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
9320 // No device check
9321 // DeviceGuard omitted
9322 return at::native::_prelu_kernel_backward(grad_output, self, weight);
9323}
9324} // anonymous namespace
9325struct structured_gelu_out_cpu_functional final : public at::native::structured_gelu_out_cpu {
9326 void set_output_strided(
9327 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9328 TensorOptions options, DimnameList names
9329 ) override {
9330 outputs_[output_idx] = create_out(sizes, strides, options);
9331 if (!names.empty()) {
9332 namedinference::propagate_names(*outputs_[output_idx], names);
9333 }
9334 // super must happen after, so that downstream can use maybe_get_output
9335 // to retrieve the output
9336 at::native::structured_gelu_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
9337 }
9338 void set_output_raw_strided(
9339 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9340 TensorOptions options, DimnameList names
9341 ) override {
9342 outputs_[output_idx] = create_out(sizes, strides, options);
9343 if (!names.empty()) {
9344 namedinference::propagate_names(*outputs_[output_idx], names);
9345 }
9346 // super must happen after, so that downstream can use maybe_get_output
9347 // to retrieve the output
9348 at::native::structured_gelu_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
9349 }
9350 const Tensor& maybe_get_output(int64_t output_idx) override {
9351 return *outputs_[output_idx];
9352 }
9353 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9354};
9355at::Tensor wrapper_CPU_gelu(const at::Tensor & self, c10::string_view approximate) {
9356structured_gelu_out_cpu_functional op;
9357op.meta(self, approximate);
9358op.impl(self, approximate, *op.outputs_[0]);
9359return std::move(op.outputs_[0]).take();
9360}
9361struct structured_gelu_out_cpu_out final : public at::native::structured_gelu_out_cpu {
9362 structured_gelu_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
9363 void set_output_strided(
9364 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9365 TensorOptions options, DimnameList names
9366 ) override {
9367 const auto& out = outputs_[output_idx].get();
9368 resize_out(out, sizes, strides, options);
9369 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9370 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9371 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9372 }
9373 if (!names.empty()) {
9374 namedinference::propagate_names(outputs_[output_idx], names);
9375 }
9376 // super must happen after, so that downstream can use maybe_get_output
9377 // to retrieve the output
9378 at::native::structured_gelu_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
9379 }
9380 void set_output_raw_strided(
9381 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9382 TensorOptions options, DimnameList names
9383 ) override {
9384 const auto& out = outputs_[output_idx].get();
9385 resize_out(out, sizes, strides, options);
9386 if (!names.empty()) {
9387 namedinference::propagate_names(outputs_[output_idx], names);
9388 }
9389 // super must happen after, so that downstream can use maybe_get_output
9390 // to retrieve the output
9391 at::native::structured_gelu_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
9392 }
9393 const Tensor& maybe_get_output(int64_t output_idx) override {
9394 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9395 }
9396 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9397 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9398};
9399at::Tensor & wrapper_CPU_gelu_out_out(const at::Tensor & self, c10::string_view approximate, at::Tensor & out) {
9400structured_gelu_out_cpu_out op(out);
9401op.meta(self, approximate);
9402op.impl(self, approximate, op.maybe_get_output(0));
9403if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9404return out;
9405}
9406struct structured_gelu_out_cpu_inplace final : public at::native::structured_gelu_out_cpu {
9407 structured_gelu_out_cpu_inplace(Tensor& self) : outputs_{std::ref(self)} {}
9408 void set_output_strided(
9409 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9410 TensorOptions options, DimnameList names
9411 ) override {
9412 const auto& out = outputs_[output_idx].get();
9413 check_inplace(out, sizes, options);
9414 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9415 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9416 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9417 }
9418 if (!names.empty()) {
9419 namedinference::propagate_names(outputs_[output_idx], names);
9420 }
9421 // super must happen after, so that downstream can use maybe_get_output
9422 // to retrieve the output
9423 at::native::structured_gelu_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
9424 }
9425 void set_output_raw_strided(
9426 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9427 TensorOptions options, DimnameList names
9428 ) override {
9429 const auto& out = outputs_[output_idx].get();
9430 check_inplace(out, sizes, options);
9431 if (!names.empty()) {
9432 namedinference::propagate_names(outputs_[output_idx], names);
9433 }
9434 // super must happen after, so that downstream can use maybe_get_output
9435 // to retrieve the output
9436 at::native::structured_gelu_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
9437 }
9438 const Tensor& maybe_get_output(int64_t output_idx) override {
9439 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9440 }
9441 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9442 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9443};
9444at::Tensor & wrapper_CPU_gelu_(at::Tensor & self, c10::string_view approximate) {
9445structured_gelu_out_cpu_inplace op(self);
9446op.meta(self, approximate);
9447op.impl(self, approximate, op.outputs_[0]);
9448if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9449return self;
9450}
9451struct structured_gelu_backward_out_cpu_functional final : public at::native::structured_gelu_backward_out_cpu {
9452 void set_output_strided(
9453 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9454 TensorOptions options, DimnameList names
9455 ) override {
9456 outputs_[output_idx] = create_out(sizes, strides, options);
9457 if (!names.empty()) {
9458 namedinference::propagate_names(*outputs_[output_idx], names);
9459 }
9460 // super must happen after, so that downstream can use maybe_get_output
9461 // to retrieve the output
9462 at::native::structured_gelu_backward_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
9463 }
9464 void set_output_raw_strided(
9465 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9466 TensorOptions options, DimnameList names
9467 ) override {
9468 outputs_[output_idx] = create_out(sizes, strides, options);
9469 if (!names.empty()) {
9470 namedinference::propagate_names(*outputs_[output_idx], names);
9471 }
9472 // super must happen after, so that downstream can use maybe_get_output
9473 // to retrieve the output
9474 at::native::structured_gelu_backward_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
9475 }
9476 const Tensor& maybe_get_output(int64_t output_idx) override {
9477 return *outputs_[output_idx];
9478 }
9479 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9480};
9481at::Tensor wrapper_CPU_gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
9482structured_gelu_backward_out_cpu_functional op;
9483op.meta(grad_output, self, approximate);
9484op.impl(grad_output, self, approximate, *op.outputs_[0]);
9485return std::move(op.outputs_[0]).take();
9486}
9487struct structured_gelu_backward_out_cpu_out final : public at::native::structured_gelu_backward_out_cpu {
9488 structured_gelu_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
9489 void set_output_strided(
9490 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9491 TensorOptions options, DimnameList names
9492 ) override {
9493 const auto& out = outputs_[output_idx].get();
9494 resize_out(out, sizes, strides, options);
9495 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9496 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9497 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9498 }
9499 if (!names.empty()) {
9500 namedinference::propagate_names(outputs_[output_idx], names);
9501 }
9502 // super must happen after, so that downstream can use maybe_get_output
9503 // to retrieve the output
9504 at::native::structured_gelu_backward_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
9505 }
9506 void set_output_raw_strided(
9507 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9508 TensorOptions options, DimnameList names
9509 ) override {
9510 const auto& out = outputs_[output_idx].get();
9511 resize_out(out, sizes, strides, options);
9512 if (!names.empty()) {
9513 namedinference::propagate_names(outputs_[output_idx], names);
9514 }
9515 // super must happen after, so that downstream can use maybe_get_output
9516 // to retrieve the output
9517 at::native::structured_gelu_backward_out_cpu::set_output_raw_strided(output_idx, sizes, strides, options, names);
9518 }
9519 const Tensor& maybe_get_output(int64_t output_idx) override {
9520 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9521 }
9522 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9523 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9524};
9525at::Tensor & wrapper_CPU_gelu_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input) {
9526structured_gelu_backward_out_cpu_out op(grad_input);
9527op.meta(grad_output, self, approximate);
9528op.impl(grad_output, self, approximate, op.maybe_get_output(0));
9529if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9530return grad_input;
9531}
9532struct structured_hardshrink_out_functional final : public at::native::structured_hardshrink_out {
9533 void set_output_strided(
9534 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9535 TensorOptions options, DimnameList names
9536 ) override {
9537 outputs_[output_idx] = create_out(sizes, strides, options);
9538 if (!names.empty()) {
9539 namedinference::propagate_names(*outputs_[output_idx], names);
9540 }
9541 // super must happen after, so that downstream can use maybe_get_output
9542 // to retrieve the output
9543 at::native::structured_hardshrink_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9544 }
9545 void set_output_raw_strided(
9546 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9547 TensorOptions options, DimnameList names
9548 ) override {
9549 outputs_[output_idx] = create_out(sizes, strides, options);
9550 if (!names.empty()) {
9551 namedinference::propagate_names(*outputs_[output_idx], names);
9552 }
9553 // super must happen after, so that downstream can use maybe_get_output
9554 // to retrieve the output
9555 at::native::structured_hardshrink_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9556 }
9557 const Tensor& maybe_get_output(int64_t output_idx) override {
9558 return *outputs_[output_idx];
9559 }
9560 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9561};
9562at::Tensor wrapper_CPU_hardshrink(const at::Tensor & self, const at::Scalar & lambd) {
9563structured_hardshrink_out_functional op;
9564op.meta(self, lambd);
9565op.impl(self, lambd, *op.outputs_[0]);
9566return std::move(op.outputs_[0]).take();
9567}
9568struct structured_hardshrink_out_out final : public at::native::structured_hardshrink_out {
9569 structured_hardshrink_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
9570 void set_output_strided(
9571 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9572 TensorOptions options, DimnameList names
9573 ) override {
9574 const auto& out = outputs_[output_idx].get();
9575 resize_out(out, sizes, strides, options);
9576 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9577 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9578 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9579 }
9580 if (!names.empty()) {
9581 namedinference::propagate_names(outputs_[output_idx], names);
9582 }
9583 // super must happen after, so that downstream can use maybe_get_output
9584 // to retrieve the output
9585 at::native::structured_hardshrink_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9586 }
9587 void set_output_raw_strided(
9588 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9589 TensorOptions options, DimnameList names
9590 ) override {
9591 const auto& out = outputs_[output_idx].get();
9592 resize_out(out, sizes, strides, options);
9593 if (!names.empty()) {
9594 namedinference::propagate_names(outputs_[output_idx], names);
9595 }
9596 // super must happen after, so that downstream can use maybe_get_output
9597 // to retrieve the output
9598 at::native::structured_hardshrink_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9599 }
9600 const Tensor& maybe_get_output(int64_t output_idx) override {
9601 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9602 }
9603 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9604 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9605};
9606at::Tensor & wrapper_CPU_hardshrink_out_out(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
9607structured_hardshrink_out_out op(out);
9608op.meta(self, lambd);
9609op.impl(self, lambd, op.maybe_get_output(0));
9610if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9611return out;
9612}
9613struct structured_hardshrink_backward_out_functional final : public at::native::structured_hardshrink_backward_out {
9614 void set_output_strided(
9615 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9616 TensorOptions options, DimnameList names
9617 ) override {
9618 outputs_[output_idx] = create_out(sizes, strides, options);
9619 if (!names.empty()) {
9620 namedinference::propagate_names(*outputs_[output_idx], names);
9621 }
9622 // super must happen after, so that downstream can use maybe_get_output
9623 // to retrieve the output
9624 at::native::structured_hardshrink_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9625 }
9626 void set_output_raw_strided(
9627 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9628 TensorOptions options, DimnameList names
9629 ) override {
9630 outputs_[output_idx] = create_out(sizes, strides, options);
9631 if (!names.empty()) {
9632 namedinference::propagate_names(*outputs_[output_idx], names);
9633 }
9634 // super must happen after, so that downstream can use maybe_get_output
9635 // to retrieve the output
9636 at::native::structured_hardshrink_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9637 }
9638 const Tensor& maybe_get_output(int64_t output_idx) override {
9639 return *outputs_[output_idx];
9640 }
9641 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9642};
9643at::Tensor wrapper_CPU_hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
9644structured_hardshrink_backward_out_functional op;
9645op.meta(grad_out, self, lambd);
9646op.impl(grad_out, self, lambd, *op.outputs_[0]);
9647return std::move(op.outputs_[0]).take();
9648}
9649struct structured_hardshrink_backward_out_out final : public at::native::structured_hardshrink_backward_out {
9650 structured_hardshrink_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
9651 void set_output_strided(
9652 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9653 TensorOptions options, DimnameList names
9654 ) override {
9655 const auto& out = outputs_[output_idx].get();
9656 resize_out(out, sizes, strides, options);
9657 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9658 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9659 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9660 }
9661 if (!names.empty()) {
9662 namedinference::propagate_names(outputs_[output_idx], names);
9663 }
9664 // super must happen after, so that downstream can use maybe_get_output
9665 // to retrieve the output
9666 at::native::structured_hardshrink_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9667 }
9668 void set_output_raw_strided(
9669 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9670 TensorOptions options, DimnameList names
9671 ) override {
9672 const auto& out = outputs_[output_idx].get();
9673 resize_out(out, sizes, strides, options);
9674 if (!names.empty()) {
9675 namedinference::propagate_names(outputs_[output_idx], names);
9676 }
9677 // super must happen after, so that downstream can use maybe_get_output
9678 // to retrieve the output
9679 at::native::structured_hardshrink_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9680 }
9681 const Tensor& maybe_get_output(int64_t output_idx) override {
9682 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9683 }
9684 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9685 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9686};
9687at::Tensor & wrapper_CPU_hardshrink_backward_out_grad_input(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
9688structured_hardshrink_backward_out_out op(grad_input);
9689op.meta(grad_out, self, lambd);
9690op.impl(grad_out, self, lambd, op.maybe_get_output(0));
9691if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9692return grad_input;
9693}
9694struct structured_rsqrt_out_functional final : public at::native::structured_rsqrt_out {
9695 void set_output_strided(
9696 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9697 TensorOptions options, DimnameList names
9698 ) override {
9699 outputs_[output_idx] = create_out(sizes, strides, options);
9700 if (!names.empty()) {
9701 namedinference::propagate_names(*outputs_[output_idx], names);
9702 }
9703 // super must happen after, so that downstream can use maybe_get_output
9704 // to retrieve the output
9705 at::native::structured_rsqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9706 }
9707 void set_output_raw_strided(
9708 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9709 TensorOptions options, DimnameList names
9710 ) override {
9711 outputs_[output_idx] = create_out(sizes, strides, options);
9712 if (!names.empty()) {
9713 namedinference::propagate_names(*outputs_[output_idx], names);
9714 }
9715 // super must happen after, so that downstream can use maybe_get_output
9716 // to retrieve the output
9717 at::native::structured_rsqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9718 }
9719 const Tensor& maybe_get_output(int64_t output_idx) override {
9720 return *outputs_[output_idx];
9721 }
9722 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9723};
9724at::Tensor wrapper_CPU_rsqrt(const at::Tensor & self) {
9725structured_rsqrt_out_functional op;
9726op.meta(self);
9727op.impl(self, *op.outputs_[0]);
9728return std::move(op.outputs_[0]).take();
9729}
9730struct structured_rsqrt_out_out final : public at::native::structured_rsqrt_out {
9731 structured_rsqrt_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
9732 void set_output_strided(
9733 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9734 TensorOptions options, DimnameList names
9735 ) override {
9736 const auto& out = outputs_[output_idx].get();
9737 resize_out(out, sizes, strides, options);
9738 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9739 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9740 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9741 }
9742 if (!names.empty()) {
9743 namedinference::propagate_names(outputs_[output_idx], names);
9744 }
9745 // super must happen after, so that downstream can use maybe_get_output
9746 // to retrieve the output
9747 at::native::structured_rsqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9748 }
9749 void set_output_raw_strided(
9750 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9751 TensorOptions options, DimnameList names
9752 ) override {
9753 const auto& out = outputs_[output_idx].get();
9754 resize_out(out, sizes, strides, options);
9755 if (!names.empty()) {
9756 namedinference::propagate_names(outputs_[output_idx], names);
9757 }
9758 // super must happen after, so that downstream can use maybe_get_output
9759 // to retrieve the output
9760 at::native::structured_rsqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9761 }
9762 const Tensor& maybe_get_output(int64_t output_idx) override {
9763 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9764 }
9765 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9766 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9767};
9768at::Tensor & wrapper_CPU_rsqrt_out_out(const at::Tensor & self, at::Tensor & out) {
9769structured_rsqrt_out_out op(out);
9770op.meta(self);
9771op.impl(self, op.maybe_get_output(0));
9772if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9773return out;
9774}
9775struct structured_rsqrt_out_inplace final : public at::native::structured_rsqrt_out {
9776 structured_rsqrt_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
9777 void set_output_strided(
9778 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9779 TensorOptions options, DimnameList names
9780 ) override {
9781 const auto& out = outputs_[output_idx].get();
9782 check_inplace(out, sizes, options);
9783 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9784 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9785 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9786 }
9787 if (!names.empty()) {
9788 namedinference::propagate_names(outputs_[output_idx], names);
9789 }
9790 // super must happen after, so that downstream can use maybe_get_output
9791 // to retrieve the output
9792 at::native::structured_rsqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9793 }
9794 void set_output_raw_strided(
9795 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9796 TensorOptions options, DimnameList names
9797 ) override {
9798 const auto& out = outputs_[output_idx].get();
9799 check_inplace(out, sizes, options);
9800 if (!names.empty()) {
9801 namedinference::propagate_names(outputs_[output_idx], names);
9802 }
9803 // super must happen after, so that downstream can use maybe_get_output
9804 // to retrieve the output
9805 at::native::structured_rsqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9806 }
9807 const Tensor& maybe_get_output(int64_t output_idx) override {
9808 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9809 }
9810 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9811 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9812};
9813at::Tensor & wrapper_CPU_rsqrt_(at::Tensor & self) {
9814structured_rsqrt_out_inplace op(self);
9815op.meta(self);
9816op.impl(self, op.outputs_[0]);
9817if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9818return self;
9819}
9820struct structured_silu_out_functional final : public at::native::structured_silu_out {
9821 void set_output_strided(
9822 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9823 TensorOptions options, DimnameList names
9824 ) override {
9825 outputs_[output_idx] = create_out(sizes, strides, options);
9826 if (!names.empty()) {
9827 namedinference::propagate_names(*outputs_[output_idx], names);
9828 }
9829 // super must happen after, so that downstream can use maybe_get_output
9830 // to retrieve the output
9831 at::native::structured_silu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9832 }
9833 void set_output_raw_strided(
9834 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9835 TensorOptions options, DimnameList names
9836 ) override {
9837 outputs_[output_idx] = create_out(sizes, strides, options);
9838 if (!names.empty()) {
9839 namedinference::propagate_names(*outputs_[output_idx], names);
9840 }
9841 // super must happen after, so that downstream can use maybe_get_output
9842 // to retrieve the output
9843 at::native::structured_silu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9844 }
9845 const Tensor& maybe_get_output(int64_t output_idx) override {
9846 return *outputs_[output_idx];
9847 }
9848 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9849};
9850at::Tensor wrapper_CPU_silu(const at::Tensor & self) {
9851structured_silu_out_functional op;
9852op.meta(self);
9853op.impl(self, *op.outputs_[0]);
9854return std::move(op.outputs_[0]).take();
9855}
9856struct structured_silu_out_out final : public at::native::structured_silu_out {
9857 structured_silu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
9858 void set_output_strided(
9859 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9860 TensorOptions options, DimnameList names
9861 ) override {
9862 const auto& out = outputs_[output_idx].get();
9863 resize_out(out, sizes, strides, options);
9864 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9865 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9866 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9867 }
9868 if (!names.empty()) {
9869 namedinference::propagate_names(outputs_[output_idx], names);
9870 }
9871 // super must happen after, so that downstream can use maybe_get_output
9872 // to retrieve the output
9873 at::native::structured_silu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9874 }
9875 void set_output_raw_strided(
9876 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9877 TensorOptions options, DimnameList names
9878 ) override {
9879 const auto& out = outputs_[output_idx].get();
9880 resize_out(out, sizes, strides, options);
9881 if (!names.empty()) {
9882 namedinference::propagate_names(outputs_[output_idx], names);
9883 }
9884 // super must happen after, so that downstream can use maybe_get_output
9885 // to retrieve the output
9886 at::native::structured_silu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9887 }
9888 const Tensor& maybe_get_output(int64_t output_idx) override {
9889 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9890 }
9891 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9892 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9893};
9894at::Tensor & wrapper_CPU_silu_out_out(const at::Tensor & self, at::Tensor & out) {
9895structured_silu_out_out op(out);
9896op.meta(self);
9897op.impl(self, op.maybe_get_output(0));
9898if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9899return out;
9900}
9901struct structured_silu_out_inplace final : public at::native::structured_silu_out {
9902 structured_silu_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
9903 void set_output_strided(
9904 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9905 TensorOptions options, DimnameList names
9906 ) override {
9907 const auto& out = outputs_[output_idx].get();
9908 check_inplace(out, sizes, options);
9909 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9910 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9911 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9912 }
9913 if (!names.empty()) {
9914 namedinference::propagate_names(outputs_[output_idx], names);
9915 }
9916 // super must happen after, so that downstream can use maybe_get_output
9917 // to retrieve the output
9918 at::native::structured_silu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9919 }
9920 void set_output_raw_strided(
9921 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9922 TensorOptions options, DimnameList names
9923 ) override {
9924 const auto& out = outputs_[output_idx].get();
9925 check_inplace(out, sizes, options);
9926 if (!names.empty()) {
9927 namedinference::propagate_names(outputs_[output_idx], names);
9928 }
9929 // super must happen after, so that downstream can use maybe_get_output
9930 // to retrieve the output
9931 at::native::structured_silu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9932 }
9933 const Tensor& maybe_get_output(int64_t output_idx) override {
9934 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
9935 }
9936 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
9937 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
9938};
9939at::Tensor & wrapper_CPU_silu_(at::Tensor & self) {
9940structured_silu_out_inplace op(self);
9941op.meta(self);
9942op.impl(self, op.outputs_[0]);
9943if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
9944return self;
9945}
9946struct structured_silu_backward_out_functional final : public at::native::structured_silu_backward_out {
9947 void set_output_strided(
9948 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9949 TensorOptions options, DimnameList names
9950 ) override {
9951 outputs_[output_idx] = create_out(sizes, strides, options);
9952 if (!names.empty()) {
9953 namedinference::propagate_names(*outputs_[output_idx], names);
9954 }
9955 // super must happen after, so that downstream can use maybe_get_output
9956 // to retrieve the output
9957 at::native::structured_silu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9958 }
9959 void set_output_raw_strided(
9960 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9961 TensorOptions options, DimnameList names
9962 ) override {
9963 outputs_[output_idx] = create_out(sizes, strides, options);
9964 if (!names.empty()) {
9965 namedinference::propagate_names(*outputs_[output_idx], names);
9966 }
9967 // super must happen after, so that downstream can use maybe_get_output
9968 // to retrieve the output
9969 at::native::structured_silu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
9970 }
9971 const Tensor& maybe_get_output(int64_t output_idx) override {
9972 return *outputs_[output_idx];
9973 }
9974 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
9975};
9976at::Tensor wrapper_CPU_silu_backward(const at::Tensor & grad_output, const at::Tensor & self) {
9977structured_silu_backward_out_functional op;
9978op.meta(grad_output, self);
9979op.impl(grad_output, self, *op.outputs_[0]);
9980return std::move(op.outputs_[0]).take();
9981}
9982struct structured_silu_backward_out_out final : public at::native::structured_silu_backward_out {
9983 structured_silu_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
9984 void set_output_strided(
9985 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
9986 TensorOptions options, DimnameList names
9987 ) override {
9988 const auto& out = outputs_[output_idx].get();
9989 resize_out(out, sizes, strides, options);
9990 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
9991 if (C10_UNLIKELY(maybe_proxy.has_value())) {
9992 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
9993 }
9994 if (!names.empty()) {
9995 namedinference::propagate_names(outputs_[output_idx], names);
9996 }
9997 // super must happen after, so that downstream can use maybe_get_output
9998 // to retrieve the output
9999 at::native::structured_silu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10000 }
10001 void set_output_raw_strided(
10002 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10003 TensorOptions options, DimnameList names
10004 ) override {
10005 const auto& out = outputs_[output_idx].get();
10006 resize_out(out, sizes, strides, options);
10007 if (!names.empty()) {
10008 namedinference::propagate_names(outputs_[output_idx], names);
10009 }
10010 // super must happen after, so that downstream can use maybe_get_output
10011 // to retrieve the output
10012 at::native::structured_silu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10013 }
10014 const Tensor& maybe_get_output(int64_t output_idx) override {
10015 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10016 }
10017 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10018 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10019};
10020at::Tensor & wrapper_CPU_silu_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
10021structured_silu_backward_out_out op(grad_input);
10022op.meta(grad_output, self);
10023op.impl(grad_output, self, op.maybe_get_output(0));
10024if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10025return grad_input;
10026}
10027struct structured_mish_out_functional final : public at::native::structured_mish_out {
10028 void set_output_strided(
10029 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10030 TensorOptions options, DimnameList names
10031 ) override {
10032 outputs_[output_idx] = create_out(sizes, strides, options);
10033 if (!names.empty()) {
10034 namedinference::propagate_names(*outputs_[output_idx], names);
10035 }
10036 // super must happen after, so that downstream can use maybe_get_output
10037 // to retrieve the output
10038 at::native::structured_mish_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10039 }
10040 void set_output_raw_strided(
10041 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10042 TensorOptions options, DimnameList names
10043 ) override {
10044 outputs_[output_idx] = create_out(sizes, strides, options);
10045 if (!names.empty()) {
10046 namedinference::propagate_names(*outputs_[output_idx], names);
10047 }
10048 // super must happen after, so that downstream can use maybe_get_output
10049 // to retrieve the output
10050 at::native::structured_mish_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10051 }
10052 const Tensor& maybe_get_output(int64_t output_idx) override {
10053 return *outputs_[output_idx];
10054 }
10055 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10056};
10057at::Tensor wrapper_CPU_mish(const at::Tensor & self) {
10058structured_mish_out_functional op;
10059op.meta(self);
10060op.impl(self, *op.outputs_[0]);
10061return std::move(op.outputs_[0]).take();
10062}
10063struct structured_mish_out_out final : public at::native::structured_mish_out {
10064 structured_mish_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
10065 void set_output_strided(
10066 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10067 TensorOptions options, DimnameList names
10068 ) override {
10069 const auto& out = outputs_[output_idx].get();
10070 resize_out(out, sizes, strides, options);
10071 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10072 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10073 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10074 }
10075 if (!names.empty()) {
10076 namedinference::propagate_names(outputs_[output_idx], names);
10077 }
10078 // super must happen after, so that downstream can use maybe_get_output
10079 // to retrieve the output
10080 at::native::structured_mish_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10081 }
10082 void set_output_raw_strided(
10083 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10084 TensorOptions options, DimnameList names
10085 ) override {
10086 const auto& out = outputs_[output_idx].get();
10087 resize_out(out, sizes, strides, options);
10088 if (!names.empty()) {
10089 namedinference::propagate_names(outputs_[output_idx], names);
10090 }
10091 // super must happen after, so that downstream can use maybe_get_output
10092 // to retrieve the output
10093 at::native::structured_mish_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10094 }
10095 const Tensor& maybe_get_output(int64_t output_idx) override {
10096 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10097 }
10098 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10099 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10100};
10101at::Tensor & wrapper_CPU_mish_out_out(const at::Tensor & self, at::Tensor & out) {
10102structured_mish_out_out op(out);
10103op.meta(self);
10104op.impl(self, op.maybe_get_output(0));
10105if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10106return out;
10107}
10108struct structured_mish_out_inplace final : public at::native::structured_mish_out {
10109 structured_mish_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10110 void set_output_strided(
10111 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10112 TensorOptions options, DimnameList names
10113 ) override {
10114 const auto& out = outputs_[output_idx].get();
10115 check_inplace(out, sizes, options);
10116 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10117 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10118 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10119 }
10120 if (!names.empty()) {
10121 namedinference::propagate_names(outputs_[output_idx], names);
10122 }
10123 // super must happen after, so that downstream can use maybe_get_output
10124 // to retrieve the output
10125 at::native::structured_mish_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10126 }
10127 void set_output_raw_strided(
10128 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10129 TensorOptions options, DimnameList names
10130 ) override {
10131 const auto& out = outputs_[output_idx].get();
10132 check_inplace(out, sizes, options);
10133 if (!names.empty()) {
10134 namedinference::propagate_names(outputs_[output_idx], names);
10135 }
10136 // super must happen after, so that downstream can use maybe_get_output
10137 // to retrieve the output
10138 at::native::structured_mish_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10139 }
10140 const Tensor& maybe_get_output(int64_t output_idx) override {
10141 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10142 }
10143 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10144 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10145};
10146at::Tensor & wrapper_CPU_mish_(at::Tensor & self) {
10147structured_mish_out_inplace op(self);
10148op.meta(self);
10149op.impl(self, op.outputs_[0]);
10150if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10151return self;
10152}
10153namespace {
10154at::Tensor wrapper_CPU__mish_backward(const at::Tensor & grad_output, const at::Tensor & self) {
10155 // No device check
10156 // DeviceGuard omitted
10157 return at::native::mish_backward(grad_output, self);
10158}
10159} // anonymous namespace
10160struct structured_sigmoid_out_functional final : public at::native::structured_sigmoid_out {
10161 void set_output_strided(
10162 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10163 TensorOptions options, DimnameList names
10164 ) override {
10165 outputs_[output_idx] = create_out(sizes, strides, options);
10166 if (!names.empty()) {
10167 namedinference::propagate_names(*outputs_[output_idx], names);
10168 }
10169 // super must happen after, so that downstream can use maybe_get_output
10170 // to retrieve the output
10171 at::native::structured_sigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10172 }
10173 void set_output_raw_strided(
10174 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10175 TensorOptions options, DimnameList names
10176 ) override {
10177 outputs_[output_idx] = create_out(sizes, strides, options);
10178 if (!names.empty()) {
10179 namedinference::propagate_names(*outputs_[output_idx], names);
10180 }
10181 // super must happen after, so that downstream can use maybe_get_output
10182 // to retrieve the output
10183 at::native::structured_sigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10184 }
10185 const Tensor& maybe_get_output(int64_t output_idx) override {
10186 return *outputs_[output_idx];
10187 }
10188 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10189};
10190at::Tensor wrapper_CPU_sigmoid(const at::Tensor & self) {
10191structured_sigmoid_out_functional op;
10192op.meta(self);
10193op.impl(self, *op.outputs_[0]);
10194return std::move(op.outputs_[0]).take();
10195}
10196struct structured_sigmoid_out_out final : public at::native::structured_sigmoid_out {
10197 structured_sigmoid_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
10198 void set_output_strided(
10199 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10200 TensorOptions options, DimnameList names
10201 ) override {
10202 const auto& out = outputs_[output_idx].get();
10203 resize_out(out, sizes, strides, options);
10204 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10205 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10206 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10207 }
10208 if (!names.empty()) {
10209 namedinference::propagate_names(outputs_[output_idx], names);
10210 }
10211 // super must happen after, so that downstream can use maybe_get_output
10212 // to retrieve the output
10213 at::native::structured_sigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10214 }
10215 void set_output_raw_strided(
10216 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10217 TensorOptions options, DimnameList names
10218 ) override {
10219 const auto& out = outputs_[output_idx].get();
10220 resize_out(out, sizes, strides, options);
10221 if (!names.empty()) {
10222 namedinference::propagate_names(outputs_[output_idx], names);
10223 }
10224 // super must happen after, so that downstream can use maybe_get_output
10225 // to retrieve the output
10226 at::native::structured_sigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10227 }
10228 const Tensor& maybe_get_output(int64_t output_idx) override {
10229 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10230 }
10231 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10232 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10233};
10234at::Tensor & wrapper_CPU_sigmoid_out_out(const at::Tensor & self, at::Tensor & out) {
10235structured_sigmoid_out_out op(out);
10236op.meta(self);
10237op.impl(self, op.maybe_get_output(0));
10238if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10239return out;
10240}
10241struct structured_sigmoid_out_inplace final : public at::native::structured_sigmoid_out {
10242 structured_sigmoid_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10243 void set_output_strided(
10244 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10245 TensorOptions options, DimnameList names
10246 ) override {
10247 const auto& out = outputs_[output_idx].get();
10248 check_inplace(out, sizes, options);
10249 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10250 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10251 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10252 }
10253 if (!names.empty()) {
10254 namedinference::propagate_names(outputs_[output_idx], names);
10255 }
10256 // super must happen after, so that downstream can use maybe_get_output
10257 // to retrieve the output
10258 at::native::structured_sigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10259 }
10260 void set_output_raw_strided(
10261 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10262 TensorOptions options, DimnameList names
10263 ) override {
10264 const auto& out = outputs_[output_idx].get();
10265 check_inplace(out, sizes, options);
10266 if (!names.empty()) {
10267 namedinference::propagate_names(outputs_[output_idx], names);
10268 }
10269 // super must happen after, so that downstream can use maybe_get_output
10270 // to retrieve the output
10271 at::native::structured_sigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10272 }
10273 const Tensor& maybe_get_output(int64_t output_idx) override {
10274 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10275 }
10276 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10277 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10278};
10279at::Tensor & wrapper_CPU_sigmoid_(at::Tensor & self) {
10280structured_sigmoid_out_inplace op(self);
10281op.meta(self);
10282op.impl(self, op.outputs_[0]);
10283if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10284return self;
10285}
10286namespace {
10287at::Tensor wrapper_CPU__logit(const at::Tensor & self, c10::optional<double> eps) {
10288 // No device check
10289 // DeviceGuard omitted
10290 return at::native::logit(self, eps);
10291}
10292} // anonymous namespace
10293namespace {
10294at::Tensor & wrapper_CPU_out_logit_out(const at::Tensor & self, c10::optional<double> eps, at::Tensor & out) {
10295 // No device check
10296 // DeviceGuard omitted
10297 return at::native::logit_out(self, eps, out);
10298}
10299} // anonymous namespace
10300namespace {
10301at::Tensor & wrapper_CPU__logit_(at::Tensor & self, c10::optional<double> eps) {
10302 // No device check
10303 // DeviceGuard omitted
10304 return at::native::logit_(self, eps);
10305}
10306} // anonymous namespace
10307struct structured_sin_out_functional final : public at::native::structured_sin_out {
10308 void set_output_strided(
10309 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10310 TensorOptions options, DimnameList names
10311 ) override {
10312 outputs_[output_idx] = create_out(sizes, strides, options);
10313 if (!names.empty()) {
10314 namedinference::propagate_names(*outputs_[output_idx], names);
10315 }
10316 // super must happen after, so that downstream can use maybe_get_output
10317 // to retrieve the output
10318 at::native::structured_sin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10319 }
10320 void set_output_raw_strided(
10321 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10322 TensorOptions options, DimnameList names
10323 ) override {
10324 outputs_[output_idx] = create_out(sizes, strides, options);
10325 if (!names.empty()) {
10326 namedinference::propagate_names(*outputs_[output_idx], names);
10327 }
10328 // super must happen after, so that downstream can use maybe_get_output
10329 // to retrieve the output
10330 at::native::structured_sin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10331 }
10332 const Tensor& maybe_get_output(int64_t output_idx) override {
10333 return *outputs_[output_idx];
10334 }
10335 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10336};
10337at::Tensor wrapper_CPU_sin(const at::Tensor & self) {
10338structured_sin_out_functional op;
10339op.meta(self);
10340op.impl(self, *op.outputs_[0]);
10341return std::move(op.outputs_[0]).take();
10342}
10343struct structured_sin_out_out final : public at::native::structured_sin_out {
10344 structured_sin_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
10345 void set_output_strided(
10346 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10347 TensorOptions options, DimnameList names
10348 ) override {
10349 const auto& out = outputs_[output_idx].get();
10350 resize_out(out, sizes, strides, options);
10351 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10352 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10353 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10354 }
10355 if (!names.empty()) {
10356 namedinference::propagate_names(outputs_[output_idx], names);
10357 }
10358 // super must happen after, so that downstream can use maybe_get_output
10359 // to retrieve the output
10360 at::native::structured_sin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10361 }
10362 void set_output_raw_strided(
10363 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10364 TensorOptions options, DimnameList names
10365 ) override {
10366 const auto& out = outputs_[output_idx].get();
10367 resize_out(out, sizes, strides, options);
10368 if (!names.empty()) {
10369 namedinference::propagate_names(outputs_[output_idx], names);
10370 }
10371 // super must happen after, so that downstream can use maybe_get_output
10372 // to retrieve the output
10373 at::native::structured_sin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10374 }
10375 const Tensor& maybe_get_output(int64_t output_idx) override {
10376 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10377 }
10378 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10379 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10380};
10381at::Tensor & wrapper_CPU_sin_out_out(const at::Tensor & self, at::Tensor & out) {
10382structured_sin_out_out op(out);
10383op.meta(self);
10384op.impl(self, op.maybe_get_output(0));
10385if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10386return out;
10387}
10388struct structured_sin_out_inplace final : public at::native::structured_sin_out {
10389 structured_sin_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10390 void set_output_strided(
10391 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10392 TensorOptions options, DimnameList names
10393 ) override {
10394 const auto& out = outputs_[output_idx].get();
10395 check_inplace(out, sizes, options);
10396 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10397 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10398 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10399 }
10400 if (!names.empty()) {
10401 namedinference::propagate_names(outputs_[output_idx], names);
10402 }
10403 // super must happen after, so that downstream can use maybe_get_output
10404 // to retrieve the output
10405 at::native::structured_sin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10406 }
10407 void set_output_raw_strided(
10408 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10409 TensorOptions options, DimnameList names
10410 ) override {
10411 const auto& out = outputs_[output_idx].get();
10412 check_inplace(out, sizes, options);
10413 if (!names.empty()) {
10414 namedinference::propagate_names(outputs_[output_idx], names);
10415 }
10416 // super must happen after, so that downstream can use maybe_get_output
10417 // to retrieve the output
10418 at::native::structured_sin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10419 }
10420 const Tensor& maybe_get_output(int64_t output_idx) override {
10421 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10422 }
10423 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10424 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10425};
10426at::Tensor & wrapper_CPU_sin_(at::Tensor & self) {
10427structured_sin_out_inplace op(self);
10428op.meta(self);
10429op.impl(self, op.outputs_[0]);
10430if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10431return self;
10432}
10433struct structured_sinc_out_functional final : public at::native::structured_sinc_out {
10434 void set_output_strided(
10435 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10436 TensorOptions options, DimnameList names
10437 ) override {
10438 outputs_[output_idx] = create_out(sizes, strides, options);
10439 if (!names.empty()) {
10440 namedinference::propagate_names(*outputs_[output_idx], names);
10441 }
10442 // super must happen after, so that downstream can use maybe_get_output
10443 // to retrieve the output
10444 at::native::structured_sinc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10445 }
10446 void set_output_raw_strided(
10447 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10448 TensorOptions options, DimnameList names
10449 ) override {
10450 outputs_[output_idx] = create_out(sizes, strides, options);
10451 if (!names.empty()) {
10452 namedinference::propagate_names(*outputs_[output_idx], names);
10453 }
10454 // super must happen after, so that downstream can use maybe_get_output
10455 // to retrieve the output
10456 at::native::structured_sinc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10457 }
10458 const Tensor& maybe_get_output(int64_t output_idx) override {
10459 return *outputs_[output_idx];
10460 }
10461 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10462};
10463at::Tensor wrapper_CPU_sinc(const at::Tensor & self) {
10464structured_sinc_out_functional op;
10465op.meta(self);
10466op.impl(self, *op.outputs_[0]);
10467return std::move(op.outputs_[0]).take();
10468}
10469struct structured_sinc_out_out final : public at::native::structured_sinc_out {
10470 structured_sinc_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
10471 void set_output_strided(
10472 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10473 TensorOptions options, DimnameList names
10474 ) override {
10475 const auto& out = outputs_[output_idx].get();
10476 resize_out(out, sizes, strides, options);
10477 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10478 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10479 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10480 }
10481 if (!names.empty()) {
10482 namedinference::propagate_names(outputs_[output_idx], names);
10483 }
10484 // super must happen after, so that downstream can use maybe_get_output
10485 // to retrieve the output
10486 at::native::structured_sinc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10487 }
10488 void set_output_raw_strided(
10489 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10490 TensorOptions options, DimnameList names
10491 ) override {
10492 const auto& out = outputs_[output_idx].get();
10493 resize_out(out, sizes, strides, options);
10494 if (!names.empty()) {
10495 namedinference::propagate_names(outputs_[output_idx], names);
10496 }
10497 // super must happen after, so that downstream can use maybe_get_output
10498 // to retrieve the output
10499 at::native::structured_sinc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10500 }
10501 const Tensor& maybe_get_output(int64_t output_idx) override {
10502 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10503 }
10504 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10505 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10506};
10507at::Tensor & wrapper_CPU_sinc_out_out(const at::Tensor & self, at::Tensor & out) {
10508structured_sinc_out_out op(out);
10509op.meta(self);
10510op.impl(self, op.maybe_get_output(0));
10511if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10512return out;
10513}
10514struct structured_sinc_out_inplace final : public at::native::structured_sinc_out {
10515 structured_sinc_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10516 void set_output_strided(
10517 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10518 TensorOptions options, DimnameList names
10519 ) override {
10520 const auto& out = outputs_[output_idx].get();
10521 check_inplace(out, sizes, options);
10522 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10523 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10524 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10525 }
10526 if (!names.empty()) {
10527 namedinference::propagate_names(outputs_[output_idx], names);
10528 }
10529 // super must happen after, so that downstream can use maybe_get_output
10530 // to retrieve the output
10531 at::native::structured_sinc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10532 }
10533 void set_output_raw_strided(
10534 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10535 TensorOptions options, DimnameList names
10536 ) override {
10537 const auto& out = outputs_[output_idx].get();
10538 check_inplace(out, sizes, options);
10539 if (!names.empty()) {
10540 namedinference::propagate_names(outputs_[output_idx], names);
10541 }
10542 // super must happen after, so that downstream can use maybe_get_output
10543 // to retrieve the output
10544 at::native::structured_sinc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10545 }
10546 const Tensor& maybe_get_output(int64_t output_idx) override {
10547 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10548 }
10549 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10550 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10551};
10552at::Tensor & wrapper_CPU_sinc_(at::Tensor & self) {
10553structured_sinc_out_inplace op(self);
10554op.meta(self);
10555op.impl(self, op.outputs_[0]);
10556if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10557return self;
10558}
10559struct structured_sinh_out_functional final : public at::native::structured_sinh_out {
10560 void set_output_strided(
10561 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10562 TensorOptions options, DimnameList names
10563 ) override {
10564 outputs_[output_idx] = create_out(sizes, strides, options);
10565 if (!names.empty()) {
10566 namedinference::propagate_names(*outputs_[output_idx], names);
10567 }
10568 // super must happen after, so that downstream can use maybe_get_output
10569 // to retrieve the output
10570 at::native::structured_sinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10571 }
10572 void set_output_raw_strided(
10573 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10574 TensorOptions options, DimnameList names
10575 ) override {
10576 outputs_[output_idx] = create_out(sizes, strides, options);
10577 if (!names.empty()) {
10578 namedinference::propagate_names(*outputs_[output_idx], names);
10579 }
10580 // super must happen after, so that downstream can use maybe_get_output
10581 // to retrieve the output
10582 at::native::structured_sinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10583 }
10584 const Tensor& maybe_get_output(int64_t output_idx) override {
10585 return *outputs_[output_idx];
10586 }
10587 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10588};
10589at::Tensor wrapper_CPU_sinh(const at::Tensor & self) {
10590structured_sinh_out_functional op;
10591op.meta(self);
10592op.impl(self, *op.outputs_[0]);
10593return std::move(op.outputs_[0]).take();
10594}
10595struct structured_sinh_out_out final : public at::native::structured_sinh_out {
10596 structured_sinh_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
10597 void set_output_strided(
10598 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10599 TensorOptions options, DimnameList names
10600 ) override {
10601 const auto& out = outputs_[output_idx].get();
10602 resize_out(out, sizes, strides, options);
10603 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10604 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10605 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10606 }
10607 if (!names.empty()) {
10608 namedinference::propagate_names(outputs_[output_idx], names);
10609 }
10610 // super must happen after, so that downstream can use maybe_get_output
10611 // to retrieve the output
10612 at::native::structured_sinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10613 }
10614 void set_output_raw_strided(
10615 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10616 TensorOptions options, DimnameList names
10617 ) override {
10618 const auto& out = outputs_[output_idx].get();
10619 resize_out(out, sizes, strides, options);
10620 if (!names.empty()) {
10621 namedinference::propagate_names(outputs_[output_idx], names);
10622 }
10623 // super must happen after, so that downstream can use maybe_get_output
10624 // to retrieve the output
10625 at::native::structured_sinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10626 }
10627 const Tensor& maybe_get_output(int64_t output_idx) override {
10628 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10629 }
10630 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10631 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10632};
10633at::Tensor & wrapper_CPU_sinh_out_out(const at::Tensor & self, at::Tensor & out) {
10634structured_sinh_out_out op(out);
10635op.meta(self);
10636op.impl(self, op.maybe_get_output(0));
10637if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10638return out;
10639}
10640struct structured_sinh_out_inplace final : public at::native::structured_sinh_out {
10641 structured_sinh_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
10642 void set_output_strided(
10643 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10644 TensorOptions options, DimnameList names
10645 ) override {
10646 const auto& out = outputs_[output_idx].get();
10647 check_inplace(out, sizes, options);
10648 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10649 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10650 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10651 }
10652 if (!names.empty()) {
10653 namedinference::propagate_names(outputs_[output_idx], names);
10654 }
10655 // super must happen after, so that downstream can use maybe_get_output
10656 // to retrieve the output
10657 at::native::structured_sinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10658 }
10659 void set_output_raw_strided(
10660 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10661 TensorOptions options, DimnameList names
10662 ) override {
10663 const auto& out = outputs_[output_idx].get();
10664 check_inplace(out, sizes, options);
10665 if (!names.empty()) {
10666 namedinference::propagate_names(outputs_[output_idx], names);
10667 }
10668 // super must happen after, so that downstream can use maybe_get_output
10669 // to retrieve the output
10670 at::native::structured_sinh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10671 }
10672 const Tensor& maybe_get_output(int64_t output_idx) override {
10673 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10674 }
10675 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10676 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10677};
10678at::Tensor & wrapper_CPU_sinh_(at::Tensor & self) {
10679structured_sinh_out_inplace op(self);
10680op.meta(self);
10681op.impl(self, op.outputs_[0]);
10682if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10683return self;
10684}
10685struct structured_softmax_cpu_out_functional final : public at::native::structured_softmax_cpu_out {
10686 void set_output_strided(
10687 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10688 TensorOptions options, DimnameList names
10689 ) override {
10690 outputs_[output_idx] = create_out(sizes, strides, options);
10691 if (!names.empty()) {
10692 namedinference::propagate_names(*outputs_[output_idx], names);
10693 }
10694 // super must happen after, so that downstream can use maybe_get_output
10695 // to retrieve the output
10696 }
10697 void set_output_raw_strided(
10698 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10699 TensorOptions options, DimnameList names
10700 ) override {
10701 outputs_[output_idx] = create_out(sizes, strides, options);
10702 if (!names.empty()) {
10703 namedinference::propagate_names(*outputs_[output_idx], names);
10704 }
10705 // super must happen after, so that downstream can use maybe_get_output
10706 // to retrieve the output
10707 }
10708 const Tensor& maybe_get_output(int64_t output_idx) override {
10709 return *outputs_[output_idx];
10710 }
10711 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10712};
10713at::Tensor wrapper_CPU__softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
10714structured_softmax_cpu_out_functional op;
10715op.meta(self, dim, half_to_float);
10716op.impl(self, dim, half_to_float, *op.outputs_[0]);
10717return std::move(op.outputs_[0]).take();
10718}
10719struct structured_softmax_cpu_out_out final : public at::native::structured_softmax_cpu_out {
10720 structured_softmax_cpu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
10721 void set_output_strided(
10722 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10723 TensorOptions options, DimnameList names
10724 ) override {
10725 const auto& out = outputs_[output_idx].get();
10726 resize_out(out, sizes, strides, options);
10727 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10728 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10729 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10730 }
10731 if (!names.empty()) {
10732 namedinference::propagate_names(outputs_[output_idx], names);
10733 }
10734 // super must happen after, so that downstream can use maybe_get_output
10735 // to retrieve the output
10736 }
10737 void set_output_raw_strided(
10738 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10739 TensorOptions options, DimnameList names
10740 ) override {
10741 const auto& out = outputs_[output_idx].get();
10742 resize_out(out, sizes, strides, options);
10743 if (!names.empty()) {
10744 namedinference::propagate_names(outputs_[output_idx], names);
10745 }
10746 // super must happen after, so that downstream can use maybe_get_output
10747 // to retrieve the output
10748 }
10749 const Tensor& maybe_get_output(int64_t output_idx) override {
10750 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10751 }
10752 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10753 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10754};
10755at::Tensor & wrapper_CPU__softmax_out_out(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
10756structured_softmax_cpu_out_out op(out);
10757op.meta(self, dim, half_to_float);
10758op.impl(self, dim, half_to_float, op.maybe_get_output(0));
10759if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10760return out;
10761}
10762struct structured_softmax_backward_cpu_out_functional final : public at::native::structured_softmax_backward_cpu_out {
10763 void set_output_strided(
10764 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10765 TensorOptions options, DimnameList names
10766 ) override {
10767 outputs_[output_idx] = create_out(sizes, strides, options);
10768 if (!names.empty()) {
10769 namedinference::propagate_names(*outputs_[output_idx], names);
10770 }
10771 // super must happen after, so that downstream can use maybe_get_output
10772 // to retrieve the output
10773 }
10774 void set_output_raw_strided(
10775 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10776 TensorOptions options, DimnameList names
10777 ) override {
10778 outputs_[output_idx] = create_out(sizes, strides, options);
10779 if (!names.empty()) {
10780 namedinference::propagate_names(*outputs_[output_idx], names);
10781 }
10782 // super must happen after, so that downstream can use maybe_get_output
10783 // to retrieve the output
10784 }
10785 const Tensor& maybe_get_output(int64_t output_idx) override {
10786 return *outputs_[output_idx];
10787 }
10788 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10789};
10790at::Tensor wrapper_CPU__softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
10791structured_softmax_backward_cpu_out_functional op;
10792op.meta(grad_output, output, dim, input_dtype);
10793op.impl(grad_output, output, dim, input_dtype, *op.outputs_[0]);
10794return std::move(op.outputs_[0]).take();
10795}
10796struct structured_softmax_backward_cpu_out_out final : public at::native::structured_softmax_backward_cpu_out {
10797 structured_softmax_backward_cpu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
10798 void set_output_strided(
10799 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10800 TensorOptions options, DimnameList names
10801 ) override {
10802 const auto& out = outputs_[output_idx].get();
10803 resize_out(out, sizes, strides, options);
10804 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10805 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10806 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10807 }
10808 if (!names.empty()) {
10809 namedinference::propagate_names(outputs_[output_idx], names);
10810 }
10811 // super must happen after, so that downstream can use maybe_get_output
10812 // to retrieve the output
10813 }
10814 void set_output_raw_strided(
10815 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10816 TensorOptions options, DimnameList names
10817 ) override {
10818 const auto& out = outputs_[output_idx].get();
10819 resize_out(out, sizes, strides, options);
10820 if (!names.empty()) {
10821 namedinference::propagate_names(outputs_[output_idx], names);
10822 }
10823 // super must happen after, so that downstream can use maybe_get_output
10824 // to retrieve the output
10825 }
10826 const Tensor& maybe_get_output(int64_t output_idx) override {
10827 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10828 }
10829 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10830 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10831};
10832at::Tensor & wrapper_CPU__softmax_backward_data_out_out(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) {
10833structured_softmax_backward_cpu_out_out op(grad_input);
10834op.meta(grad_output, output, dim, input_dtype);
10835op.impl(grad_output, output, dim, input_dtype, op.maybe_get_output(0));
10836if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10837return grad_input;
10838}
10839namespace {
10840at::Tensor & wrapper_CPU_out_sspaddmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
10841 // No device check
10842 // DeviceGuard omitted
10843 return at::native::_sspaddmm_out_only_sparse(self, mat1, mat2, beta, alpha, out);
10844}
10845} // anonymous namespace
10846namespace {
10847at::Tensor wrapper_CPU___stack(at::TensorList tensors, int64_t dim) {
10848 // No device check
10849 // DeviceGuard omitted
10850 return at::native::_stack_cpu(tensors, dim);
10851}
10852} // anonymous namespace
10853namespace {
10854at::Tensor & wrapper_CPU_out__stack_out(at::TensorList tensors, int64_t dim, at::Tensor & out) {
10855 // No device check
10856 // DeviceGuard omitted
10857 return at::native::_stack_out_cpu(tensors, dim, out);
10858}
10859} // anonymous namespace
10860struct structured_sum_out_functional final : public at::native::structured_sum_out {
10861 void set_output_strided(
10862 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10863 TensorOptions options, DimnameList names
10864 ) override {
10865 outputs_[output_idx] = create_out(sizes, strides, options);
10866 if (!names.empty()) {
10867 namedinference::propagate_names(*outputs_[output_idx], names);
10868 }
10869 // super must happen after, so that downstream can use maybe_get_output
10870 // to retrieve the output
10871 }
10872 void set_output_raw_strided(
10873 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10874 TensorOptions options, DimnameList names
10875 ) override {
10876 outputs_[output_idx] = create_out(sizes, strides, options);
10877 if (!names.empty()) {
10878 namedinference::propagate_names(*outputs_[output_idx], names);
10879 }
10880 // super must happen after, so that downstream can use maybe_get_output
10881 // to retrieve the output
10882 }
10883 const Tensor& maybe_get_output(int64_t output_idx) override {
10884 return *outputs_[output_idx];
10885 }
10886 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10887};
10888at::Tensor wrapper_CPU_sum_dim_IntList(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
10889structured_sum_out_functional op;
10890op.meta(self, dim, keepdim, dtype);
10891op.impl(self, dim, keepdim, dtype, *op.outputs_[0]);
10892return std::move(op.outputs_[0]).take();
10893}
10894struct structured_sum_out_out final : public at::native::structured_sum_out {
10895 structured_sum_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
10896 void set_output_strided(
10897 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10898 TensorOptions options, DimnameList names
10899 ) override {
10900 const auto& out = outputs_[output_idx].get();
10901 resize_out(out, sizes, strides, options);
10902 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10903 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10904 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10905 }
10906 if (!names.empty()) {
10907 namedinference::propagate_names(outputs_[output_idx], names);
10908 }
10909 // super must happen after, so that downstream can use maybe_get_output
10910 // to retrieve the output
10911 }
10912 void set_output_raw_strided(
10913 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10914 TensorOptions options, DimnameList names
10915 ) override {
10916 const auto& out = outputs_[output_idx].get();
10917 resize_out(out, sizes, strides, options);
10918 if (!names.empty()) {
10919 namedinference::propagate_names(outputs_[output_idx], names);
10920 }
10921 // super must happen after, so that downstream can use maybe_get_output
10922 // to retrieve the output
10923 }
10924 const Tensor& maybe_get_output(int64_t output_idx) override {
10925 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
10926 }
10927 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
10928 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
10929};
10930at::Tensor & wrapper_CPU_sum_out_IntList_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
10931structured_sum_out_out op(out);
10932op.meta(self, dim, keepdim, dtype);
10933op.impl(self, dim, keepdim, dtype, op.maybe_get_output(0));
10934if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
10935return out;
10936}
10937namespace {
10938at::Tensor wrapper_CPU__nansum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
10939 // No device check
10940 // DeviceGuard omitted
10941 return at::native::nansum(self, dim, keepdim, dtype);
10942}
10943} // anonymous namespace
10944namespace {
10945at::Tensor & wrapper_CPU_out_nansum_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
10946 // No device check
10947 // DeviceGuard omitted
10948 return at::native::nansum_out(self, dim, keepdim, dtype, out);
10949}
10950} // anonymous namespace
10951struct structured_sqrt_out_functional final : public at::native::structured_sqrt_out {
10952 void set_output_strided(
10953 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10954 TensorOptions options, DimnameList names
10955 ) override {
10956 outputs_[output_idx] = create_out(sizes, strides, options);
10957 if (!names.empty()) {
10958 namedinference::propagate_names(*outputs_[output_idx], names);
10959 }
10960 // super must happen after, so that downstream can use maybe_get_output
10961 // to retrieve the output
10962 at::native::structured_sqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10963 }
10964 void set_output_raw_strided(
10965 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10966 TensorOptions options, DimnameList names
10967 ) override {
10968 outputs_[output_idx] = create_out(sizes, strides, options);
10969 if (!names.empty()) {
10970 namedinference::propagate_names(*outputs_[output_idx], names);
10971 }
10972 // super must happen after, so that downstream can use maybe_get_output
10973 // to retrieve the output
10974 at::native::structured_sqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
10975 }
10976 const Tensor& maybe_get_output(int64_t output_idx) override {
10977 return *outputs_[output_idx];
10978 }
10979 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
10980};
10981at::Tensor wrapper_CPU_sqrt(const at::Tensor & self) {
10982structured_sqrt_out_functional op;
10983op.meta(self);
10984op.impl(self, *op.outputs_[0]);
10985return std::move(op.outputs_[0]).take();
10986}
10987struct structured_sqrt_out_out final : public at::native::structured_sqrt_out {
10988 structured_sqrt_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
10989 void set_output_strided(
10990 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
10991 TensorOptions options, DimnameList names
10992 ) override {
10993 const auto& out = outputs_[output_idx].get();
10994 resize_out(out, sizes, strides, options);
10995 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
10996 if (C10_UNLIKELY(maybe_proxy.has_value())) {
10997 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
10998 }
10999 if (!names.empty()) {
11000 namedinference::propagate_names(outputs_[output_idx], names);
11001 }
11002 // super must happen after, so that downstream can use maybe_get_output
11003 // to retrieve the output
11004 at::native::structured_sqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11005 }
11006 void set_output_raw_strided(
11007 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11008 TensorOptions options, DimnameList names
11009 ) override {
11010 const auto& out = outputs_[output_idx].get();
11011 resize_out(out, sizes, strides, options);
11012 if (!names.empty()) {
11013 namedinference::propagate_names(outputs_[output_idx], names);
11014 }
11015 // super must happen after, so that downstream can use maybe_get_output
11016 // to retrieve the output
11017 at::native::structured_sqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11018 }
11019 const Tensor& maybe_get_output(int64_t output_idx) override {
11020 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11021 }
11022 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11023 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11024};
11025at::Tensor & wrapper_CPU_sqrt_out_out(const at::Tensor & self, at::Tensor & out) {
11026structured_sqrt_out_out op(out);
11027op.meta(self);
11028op.impl(self, op.maybe_get_output(0));
11029if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11030return out;
11031}
11032struct structured_sqrt_out_inplace final : public at::native::structured_sqrt_out {
11033 structured_sqrt_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11034 void set_output_strided(
11035 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11036 TensorOptions options, DimnameList names
11037 ) override {
11038 const auto& out = outputs_[output_idx].get();
11039 check_inplace(out, sizes, options);
11040 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11041 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11042 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11043 }
11044 if (!names.empty()) {
11045 namedinference::propagate_names(outputs_[output_idx], names);
11046 }
11047 // super must happen after, so that downstream can use maybe_get_output
11048 // to retrieve the output
11049 at::native::structured_sqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11050 }
11051 void set_output_raw_strided(
11052 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11053 TensorOptions options, DimnameList names
11054 ) override {
11055 const auto& out = outputs_[output_idx].get();
11056 check_inplace(out, sizes, options);
11057 if (!names.empty()) {
11058 namedinference::propagate_names(outputs_[output_idx], names);
11059 }
11060 // super must happen after, so that downstream can use maybe_get_output
11061 // to retrieve the output
11062 at::native::structured_sqrt_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11063 }
11064 const Tensor& maybe_get_output(int64_t output_idx) override {
11065 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11066 }
11067 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11068 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11069};
11070at::Tensor & wrapper_CPU_sqrt_(at::Tensor & self) {
11071structured_sqrt_out_inplace op(self);
11072op.meta(self);
11073op.impl(self, op.outputs_[0]);
11074if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11075return self;
11076}
11077namespace {
11078at::Tensor wrapper_CPU_correction_std(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
11079 // No device check
11080 // DeviceGuard omitted
11081 return at::native::std(self, dim, correction, keepdim);
11082}
11083} // anonymous namespace
11084namespace {
11085at::Tensor & wrapper_CPU_correction_out_std_out(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
11086 // No device check
11087 // DeviceGuard omitted
11088 return at::native::std_out(self, dim, correction, keepdim, out);
11089}
11090} // anonymous namespace
11091namespace {
11092::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_correction_std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
11093 // No device check
11094 // DeviceGuard omitted
11095 return at::native::std_mean(self, dim, correction, keepdim);
11096}
11097} // anonymous namespace
11098namespace {
11099at::Tensor wrapper_CPU__prod(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
11100 // No device check
11101 // DeviceGuard omitted
11102 return at::native::prod(self, dtype);
11103}
11104} // anonymous namespace
11105struct structured_prod_out_functional final : public at::native::structured_prod_out {
11106 void set_output_strided(
11107 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11108 TensorOptions options, DimnameList names
11109 ) override {
11110 outputs_[output_idx] = create_out(sizes, strides, options);
11111 if (!names.empty()) {
11112 namedinference::propagate_names(*outputs_[output_idx], names);
11113 }
11114 // super must happen after, so that downstream can use maybe_get_output
11115 // to retrieve the output
11116 }
11117 void set_output_raw_strided(
11118 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11119 TensorOptions options, DimnameList names
11120 ) override {
11121 outputs_[output_idx] = create_out(sizes, strides, options);
11122 if (!names.empty()) {
11123 namedinference::propagate_names(*outputs_[output_idx], names);
11124 }
11125 // super must happen after, so that downstream can use maybe_get_output
11126 // to retrieve the output
11127 }
11128 const Tensor& maybe_get_output(int64_t output_idx) override {
11129 return *outputs_[output_idx];
11130 }
11131 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11132};
11133at::Tensor wrapper_CPU_prod_dim_int(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11134structured_prod_out_functional op;
11135op.meta(self, dim, keepdim, dtype);
11136op.impl(self, dim, keepdim, dtype, *op.outputs_[0]);
11137return std::move(op.outputs_[0]).take();
11138}
11139struct structured_prod_out_out final : public at::native::structured_prod_out {
11140 structured_prod_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
11141 void set_output_strided(
11142 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11143 TensorOptions options, DimnameList names
11144 ) override {
11145 const auto& out = outputs_[output_idx].get();
11146 resize_out(out, sizes, strides, options);
11147 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11148 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11149 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11150 }
11151 if (!names.empty()) {
11152 namedinference::propagate_names(outputs_[output_idx], names);
11153 }
11154 // super must happen after, so that downstream can use maybe_get_output
11155 // to retrieve the output
11156 }
11157 void set_output_raw_strided(
11158 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11159 TensorOptions options, DimnameList names
11160 ) override {
11161 const auto& out = outputs_[output_idx].get();
11162 resize_out(out, sizes, strides, options);
11163 if (!names.empty()) {
11164 namedinference::propagate_names(outputs_[output_idx], names);
11165 }
11166 // super must happen after, so that downstream can use maybe_get_output
11167 // to retrieve the output
11168 }
11169 const Tensor& maybe_get_output(int64_t output_idx) override {
11170 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11171 }
11172 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11173 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11174};
11175at::Tensor & wrapper_CPU_prod_out_int_out(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
11176structured_prod_out_out op(out);
11177op.meta(self, dim, keepdim, dtype);
11178op.impl(self, dim, keepdim, dtype, op.maybe_get_output(0));
11179if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11180return out;
11181}
11182struct structured_tan_out_functional final : public at::native::structured_tan_out {
11183 void set_output_strided(
11184 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11185 TensorOptions options, DimnameList names
11186 ) override {
11187 outputs_[output_idx] = create_out(sizes, strides, options);
11188 if (!names.empty()) {
11189 namedinference::propagate_names(*outputs_[output_idx], names);
11190 }
11191 // super must happen after, so that downstream can use maybe_get_output
11192 // to retrieve the output
11193 at::native::structured_tan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11194 }
11195 void set_output_raw_strided(
11196 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11197 TensorOptions options, DimnameList names
11198 ) override {
11199 outputs_[output_idx] = create_out(sizes, strides, options);
11200 if (!names.empty()) {
11201 namedinference::propagate_names(*outputs_[output_idx], names);
11202 }
11203 // super must happen after, so that downstream can use maybe_get_output
11204 // to retrieve the output
11205 at::native::structured_tan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11206 }
11207 const Tensor& maybe_get_output(int64_t output_idx) override {
11208 return *outputs_[output_idx];
11209 }
11210 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11211};
11212at::Tensor wrapper_CPU_tan(const at::Tensor & self) {
11213structured_tan_out_functional op;
11214op.meta(self);
11215op.impl(self, *op.outputs_[0]);
11216return std::move(op.outputs_[0]).take();
11217}
11218struct structured_tan_out_out final : public at::native::structured_tan_out {
11219 structured_tan_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
11220 void set_output_strided(
11221 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11222 TensorOptions options, DimnameList names
11223 ) override {
11224 const auto& out = outputs_[output_idx].get();
11225 resize_out(out, sizes, strides, options);
11226 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11227 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11228 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11229 }
11230 if (!names.empty()) {
11231 namedinference::propagate_names(outputs_[output_idx], names);
11232 }
11233 // super must happen after, so that downstream can use maybe_get_output
11234 // to retrieve the output
11235 at::native::structured_tan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11236 }
11237 void set_output_raw_strided(
11238 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11239 TensorOptions options, DimnameList names
11240 ) override {
11241 const auto& out = outputs_[output_idx].get();
11242 resize_out(out, sizes, strides, options);
11243 if (!names.empty()) {
11244 namedinference::propagate_names(outputs_[output_idx], names);
11245 }
11246 // super must happen after, so that downstream can use maybe_get_output
11247 // to retrieve the output
11248 at::native::structured_tan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11249 }
11250 const Tensor& maybe_get_output(int64_t output_idx) override {
11251 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11252 }
11253 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11254 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11255};
11256at::Tensor & wrapper_CPU_tan_out_out(const at::Tensor & self, at::Tensor & out) {
11257structured_tan_out_out op(out);
11258op.meta(self);
11259op.impl(self, op.maybe_get_output(0));
11260if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11261return out;
11262}
11263struct structured_tan_out_inplace final : public at::native::structured_tan_out {
11264 structured_tan_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11265 void set_output_strided(
11266 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11267 TensorOptions options, DimnameList names
11268 ) override {
11269 const auto& out = outputs_[output_idx].get();
11270 check_inplace(out, sizes, options);
11271 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11272 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11273 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11274 }
11275 if (!names.empty()) {
11276 namedinference::propagate_names(outputs_[output_idx], names);
11277 }
11278 // super must happen after, so that downstream can use maybe_get_output
11279 // to retrieve the output
11280 at::native::structured_tan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11281 }
11282 void set_output_raw_strided(
11283 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11284 TensorOptions options, DimnameList names
11285 ) override {
11286 const auto& out = outputs_[output_idx].get();
11287 check_inplace(out, sizes, options);
11288 if (!names.empty()) {
11289 namedinference::propagate_names(outputs_[output_idx], names);
11290 }
11291 // super must happen after, so that downstream can use maybe_get_output
11292 // to retrieve the output
11293 at::native::structured_tan_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11294 }
11295 const Tensor& maybe_get_output(int64_t output_idx) override {
11296 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11297 }
11298 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11299 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11300};
11301at::Tensor & wrapper_CPU_tan_(at::Tensor & self) {
11302structured_tan_out_inplace op(self);
11303op.meta(self);
11304op.impl(self, op.outputs_[0]);
11305if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11306return self;
11307}
11308struct structured_tanh_out_functional final : public at::native::structured_tanh_out {
11309 void set_output_strided(
11310 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11311 TensorOptions options, DimnameList names
11312 ) override {
11313 outputs_[output_idx] = create_out(sizes, strides, options);
11314 if (!names.empty()) {
11315 namedinference::propagate_names(*outputs_[output_idx], names);
11316 }
11317 // super must happen after, so that downstream can use maybe_get_output
11318 // to retrieve the output
11319 at::native::structured_tanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11320 }
11321 void set_output_raw_strided(
11322 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11323 TensorOptions options, DimnameList names
11324 ) override {
11325 outputs_[output_idx] = create_out(sizes, strides, options);
11326 if (!names.empty()) {
11327 namedinference::propagate_names(*outputs_[output_idx], names);
11328 }
11329 // super must happen after, so that downstream can use maybe_get_output
11330 // to retrieve the output
11331 at::native::structured_tanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11332 }
11333 const Tensor& maybe_get_output(int64_t output_idx) override {
11334 return *outputs_[output_idx];
11335 }
11336 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11337};
11338at::Tensor wrapper_CPU_tanh(const at::Tensor & self) {
11339structured_tanh_out_functional op;
11340op.meta(self);
11341op.impl(self, *op.outputs_[0]);
11342return std::move(op.outputs_[0]).take();
11343}
11344struct structured_tanh_out_out final : public at::native::structured_tanh_out {
11345 structured_tanh_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
11346 void set_output_strided(
11347 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11348 TensorOptions options, DimnameList names
11349 ) override {
11350 const auto& out = outputs_[output_idx].get();
11351 resize_out(out, sizes, strides, options);
11352 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11353 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11354 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11355 }
11356 if (!names.empty()) {
11357 namedinference::propagate_names(outputs_[output_idx], names);
11358 }
11359 // super must happen after, so that downstream can use maybe_get_output
11360 // to retrieve the output
11361 at::native::structured_tanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11362 }
11363 void set_output_raw_strided(
11364 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11365 TensorOptions options, DimnameList names
11366 ) override {
11367 const auto& out = outputs_[output_idx].get();
11368 resize_out(out, sizes, strides, options);
11369 if (!names.empty()) {
11370 namedinference::propagate_names(outputs_[output_idx], names);
11371 }
11372 // super must happen after, so that downstream can use maybe_get_output
11373 // to retrieve the output
11374 at::native::structured_tanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11375 }
11376 const Tensor& maybe_get_output(int64_t output_idx) override {
11377 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11378 }
11379 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11380 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11381};
11382at::Tensor & wrapper_CPU_tanh_out_out(const at::Tensor & self, at::Tensor & out) {
11383structured_tanh_out_out op(out);
11384op.meta(self);
11385op.impl(self, op.maybe_get_output(0));
11386if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11387return out;
11388}
11389struct structured_tanh_out_inplace final : public at::native::structured_tanh_out {
11390 structured_tanh_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11391 void set_output_strided(
11392 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11393 TensorOptions options, DimnameList names
11394 ) override {
11395 const auto& out = outputs_[output_idx].get();
11396 check_inplace(out, sizes, options);
11397 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11398 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11399 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11400 }
11401 if (!names.empty()) {
11402 namedinference::propagate_names(outputs_[output_idx], names);
11403 }
11404 // super must happen after, so that downstream can use maybe_get_output
11405 // to retrieve the output
11406 at::native::structured_tanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11407 }
11408 void set_output_raw_strided(
11409 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11410 TensorOptions options, DimnameList names
11411 ) override {
11412 const auto& out = outputs_[output_idx].get();
11413 check_inplace(out, sizes, options);
11414 if (!names.empty()) {
11415 namedinference::propagate_names(outputs_[output_idx], names);
11416 }
11417 // super must happen after, so that downstream can use maybe_get_output
11418 // to retrieve the output
11419 at::native::structured_tanh_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11420 }
11421 const Tensor& maybe_get_output(int64_t output_idx) override {
11422 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11423 }
11424 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11425 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11426};
11427at::Tensor & wrapper_CPU_tanh_(at::Tensor & self) {
11428structured_tanh_out_inplace op(self);
11429op.meta(self);
11430op.impl(self, op.outputs_[0]);
11431if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11432return self;
11433}
11434namespace {
11435at::Tensor & wrapper_CPU_out_tensordot_out(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out) {
11436 // No device check
11437 // DeviceGuard omitted
11438 return at::native::tensordot_out(self, other, dims_self, dims_other, out);
11439}
11440} // anonymous namespace
11441struct structured_threshold_out_functional final : public at::native::structured_threshold_out {
11442 void set_output_strided(
11443 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11444 TensorOptions options, DimnameList names
11445 ) override {
11446 outputs_[output_idx] = create_out(sizes, strides, options);
11447 if (!names.empty()) {
11448 namedinference::propagate_names(*outputs_[output_idx], names);
11449 }
11450 // super must happen after, so that downstream can use maybe_get_output
11451 // to retrieve the output
11452 at::native::structured_threshold_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11453 }
11454 void set_output_raw_strided(
11455 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11456 TensorOptions options, DimnameList names
11457 ) override {
11458 outputs_[output_idx] = create_out(sizes, strides, options);
11459 if (!names.empty()) {
11460 namedinference::propagate_names(*outputs_[output_idx], names);
11461 }
11462 // super must happen after, so that downstream can use maybe_get_output
11463 // to retrieve the output
11464 at::native::structured_threshold_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11465 }
11466 const Tensor& maybe_get_output(int64_t output_idx) override {
11467 return *outputs_[output_idx];
11468 }
11469 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11470};
11471at::Tensor wrapper_CPU_threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
11472structured_threshold_out_functional op;
11473op.meta(self, threshold, value);
11474op.impl(self, threshold, value, *op.outputs_[0]);
11475return std::move(op.outputs_[0]).take();
11476}
11477struct structured_threshold_out_out final : public at::native::structured_threshold_out {
11478 structured_threshold_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
11479 void set_output_strided(
11480 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11481 TensorOptions options, DimnameList names
11482 ) override {
11483 const auto& out = outputs_[output_idx].get();
11484 resize_out(out, sizes, strides, options);
11485 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11486 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11487 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11488 }
11489 if (!names.empty()) {
11490 namedinference::propagate_names(outputs_[output_idx], names);
11491 }
11492 // super must happen after, so that downstream can use maybe_get_output
11493 // to retrieve the output
11494 at::native::structured_threshold_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11495 }
11496 void set_output_raw_strided(
11497 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11498 TensorOptions options, DimnameList names
11499 ) override {
11500 const auto& out = outputs_[output_idx].get();
11501 resize_out(out, sizes, strides, options);
11502 if (!names.empty()) {
11503 namedinference::propagate_names(outputs_[output_idx], names);
11504 }
11505 // super must happen after, so that downstream can use maybe_get_output
11506 // to retrieve the output
11507 at::native::structured_threshold_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11508 }
11509 const Tensor& maybe_get_output(int64_t output_idx) override {
11510 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11511 }
11512 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11513 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11514};
11515at::Tensor & wrapper_CPU_threshold_out_out(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out) {
11516structured_threshold_out_out op(out);
11517op.meta(self, threshold, value);
11518op.impl(self, threshold, value, op.maybe_get_output(0));
11519if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11520return out;
11521}
11522struct structured_threshold_out_inplace final : public at::native::structured_threshold_out {
11523 structured_threshold_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11524 void set_output_strided(
11525 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11526 TensorOptions options, DimnameList names
11527 ) override {
11528 const auto& out = outputs_[output_idx].get();
11529 check_inplace(out, sizes, options);
11530 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11531 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11532 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11533 }
11534 if (!names.empty()) {
11535 namedinference::propagate_names(outputs_[output_idx], names);
11536 }
11537 // super must happen after, so that downstream can use maybe_get_output
11538 // to retrieve the output
11539 at::native::structured_threshold_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11540 }
11541 void set_output_raw_strided(
11542 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11543 TensorOptions options, DimnameList names
11544 ) override {
11545 const auto& out = outputs_[output_idx].get();
11546 check_inplace(out, sizes, options);
11547 if (!names.empty()) {
11548 namedinference::propagate_names(outputs_[output_idx], names);
11549 }
11550 // super must happen after, so that downstream can use maybe_get_output
11551 // to retrieve the output
11552 at::native::structured_threshold_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11553 }
11554 const Tensor& maybe_get_output(int64_t output_idx) override {
11555 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11556 }
11557 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11558 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11559};
11560at::Tensor & wrapper_CPU_threshold_(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
11561structured_threshold_out_inplace op(self);
11562op.meta(self, threshold, value);
11563op.impl(self, threshold, value, op.outputs_[0]);
11564if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11565return self;
11566}
11567struct structured_threshold_backward_out_functional final : public at::native::structured_threshold_backward_out {
11568 void set_output_strided(
11569 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11570 TensorOptions options, DimnameList names
11571 ) override {
11572 outputs_[output_idx] = create_out(sizes, strides, options);
11573 if (!names.empty()) {
11574 namedinference::propagate_names(*outputs_[output_idx], names);
11575 }
11576 // super must happen after, so that downstream can use maybe_get_output
11577 // to retrieve the output
11578 at::native::structured_threshold_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11579 }
11580 void set_output_raw_strided(
11581 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11582 TensorOptions options, DimnameList names
11583 ) override {
11584 outputs_[output_idx] = create_out(sizes, strides, options);
11585 if (!names.empty()) {
11586 namedinference::propagate_names(*outputs_[output_idx], names);
11587 }
11588 // super must happen after, so that downstream can use maybe_get_output
11589 // to retrieve the output
11590 at::native::structured_threshold_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11591 }
11592 const Tensor& maybe_get_output(int64_t output_idx) override {
11593 return *outputs_[output_idx];
11594 }
11595 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11596};
11597at::Tensor wrapper_CPU_threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
11598structured_threshold_backward_out_functional op;
11599op.meta(grad_output, self, threshold);
11600op.impl(grad_output, self, threshold, *op.outputs_[0]);
11601return std::move(op.outputs_[0]).take();
11602}
11603struct structured_threshold_backward_out_out final : public at::native::structured_threshold_backward_out {
11604 structured_threshold_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
11605 void set_output_strided(
11606 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11607 TensorOptions options, DimnameList names
11608 ) override {
11609 const auto& out = outputs_[output_idx].get();
11610 resize_out(out, sizes, strides, options);
11611 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11612 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11613 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11614 }
11615 if (!names.empty()) {
11616 namedinference::propagate_names(outputs_[output_idx], names);
11617 }
11618 // super must happen after, so that downstream can use maybe_get_output
11619 // to retrieve the output
11620 at::native::structured_threshold_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11621 }
11622 void set_output_raw_strided(
11623 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11624 TensorOptions options, DimnameList names
11625 ) override {
11626 const auto& out = outputs_[output_idx].get();
11627 resize_out(out, sizes, strides, options);
11628 if (!names.empty()) {
11629 namedinference::propagate_names(outputs_[output_idx], names);
11630 }
11631 // super must happen after, so that downstream can use maybe_get_output
11632 // to retrieve the output
11633 at::native::structured_threshold_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11634 }
11635 const Tensor& maybe_get_output(int64_t output_idx) override {
11636 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11637 }
11638 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11639 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11640};
11641at::Tensor & wrapper_CPU_threshold_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
11642structured_threshold_backward_out_out op(grad_input);
11643op.meta(grad_output, self, threshold);
11644op.impl(grad_output, self, threshold, op.maybe_get_output(0));
11645if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11646return grad_input;
11647}
11648namespace {
11649at::Tensor wrapper_CPU__flip(const at::Tensor & self, at::IntArrayRef dims) {
11650 // No device check
11651 // DeviceGuard omitted
11652 return at::native::flip(self, dims);
11653}
11654} // anonymous namespace
11655namespace {
11656at::Tensor wrapper_CPU__roll(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims) {
11657 // No device check
11658 // DeviceGuard omitted
11659 return at::native::roll_cpu(self, shifts, dims);
11660}
11661} // anonymous namespace
11662namespace {
11663::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___transform_bias_rescale_qkv(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
11664 // No device check
11665 // DeviceGuard omitted
11666 return at::native::transform_bias_rescale_qkv_cpu(qkv, qkv_bias, num_heads);
11667}
11668} // anonymous namespace
11669namespace {
11670at::Tensor wrapper_CPU___nested_tensor_from_mask(const at::Tensor & t, const at::Tensor & mask, bool mask_check) {
11671 // No device check
11672 // DeviceGuard omitted
11673 return at::native::NestedTensor_nested_tensor_from_mask(t, mask, mask_check);
11674}
11675} // anonymous namespace
11676namespace {
11677bool wrapper_CPU___nested_tensor_from_mask_left_aligned(const at::Tensor & t, const at::Tensor & mask) {
11678 // No device check
11679 // DeviceGuard omitted
11680 return at::native::NestedTensor_nested_tensor_from_mask_left_aligned(t, mask);
11681}
11682} // anonymous namespace
11683namespace {
11684at::Tensor wrapper_CPU___nested_from_padded(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) {
11685 // No device check
11686 // DeviceGuard omitted
11687 return at::native::nested_from_padded_generic(padded, cpu_nested_shape_example, fuse_transform_0213);
11688}
11689} // anonymous namespace
11690namespace {
11691at::Tensor wrapper_CPU___nested_view_from_buffer(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
11692 // No device check
11693 // DeviceGuard omitted
11694 return at::native::_nested_view_from_buffer(self, nested_size, nested_strides, offsets);
11695}
11696} // anonymous namespace
11697struct structured_trunc_out_functional final : public at::native::structured_trunc_out {
11698 void set_output_strided(
11699 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11700 TensorOptions options, DimnameList names
11701 ) override {
11702 outputs_[output_idx] = create_out(sizes, strides, options);
11703 if (!names.empty()) {
11704 namedinference::propagate_names(*outputs_[output_idx], names);
11705 }
11706 // super must happen after, so that downstream can use maybe_get_output
11707 // to retrieve the output
11708 at::native::structured_trunc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11709 }
11710 void set_output_raw_strided(
11711 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11712 TensorOptions options, DimnameList names
11713 ) override {
11714 outputs_[output_idx] = create_out(sizes, strides, options);
11715 if (!names.empty()) {
11716 namedinference::propagate_names(*outputs_[output_idx], names);
11717 }
11718 // super must happen after, so that downstream can use maybe_get_output
11719 // to retrieve the output
11720 at::native::structured_trunc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11721 }
11722 const Tensor& maybe_get_output(int64_t output_idx) override {
11723 return *outputs_[output_idx];
11724 }
11725 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11726};
11727at::Tensor wrapper_CPU_trunc(const at::Tensor & self) {
11728structured_trunc_out_functional op;
11729op.meta(self);
11730op.impl(self, *op.outputs_[0]);
11731return std::move(op.outputs_[0]).take();
11732}
11733struct structured_trunc_out_out final : public at::native::structured_trunc_out {
11734 structured_trunc_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
11735 void set_output_strided(
11736 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11737 TensorOptions options, DimnameList names
11738 ) override {
11739 const auto& out = outputs_[output_idx].get();
11740 resize_out(out, sizes, strides, options);
11741 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11742 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11743 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11744 }
11745 if (!names.empty()) {
11746 namedinference::propagate_names(outputs_[output_idx], names);
11747 }
11748 // super must happen after, so that downstream can use maybe_get_output
11749 // to retrieve the output
11750 at::native::structured_trunc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11751 }
11752 void set_output_raw_strided(
11753 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11754 TensorOptions options, DimnameList names
11755 ) override {
11756 const auto& out = outputs_[output_idx].get();
11757 resize_out(out, sizes, strides, options);
11758 if (!names.empty()) {
11759 namedinference::propagate_names(outputs_[output_idx], names);
11760 }
11761 // super must happen after, so that downstream can use maybe_get_output
11762 // to retrieve the output
11763 at::native::structured_trunc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11764 }
11765 const Tensor& maybe_get_output(int64_t output_idx) override {
11766 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11767 }
11768 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11769 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11770};
11771at::Tensor & wrapper_CPU_trunc_out_out(const at::Tensor & self, at::Tensor & out) {
11772structured_trunc_out_out op(out);
11773op.meta(self);
11774op.impl(self, op.maybe_get_output(0));
11775if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11776return out;
11777}
11778struct structured_trunc_out_inplace final : public at::native::structured_trunc_out {
11779 structured_trunc_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
11780 void set_output_strided(
11781 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11782 TensorOptions options, DimnameList names
11783 ) override {
11784 const auto& out = outputs_[output_idx].get();
11785 check_inplace(out, sizes, options);
11786 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
11787 if (C10_UNLIKELY(maybe_proxy.has_value())) {
11788 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
11789 }
11790 if (!names.empty()) {
11791 namedinference::propagate_names(outputs_[output_idx], names);
11792 }
11793 // super must happen after, so that downstream can use maybe_get_output
11794 // to retrieve the output
11795 at::native::structured_trunc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11796 }
11797 void set_output_raw_strided(
11798 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11799 TensorOptions options, DimnameList names
11800 ) override {
11801 const auto& out = outputs_[output_idx].get();
11802 check_inplace(out, sizes, options);
11803 if (!names.empty()) {
11804 namedinference::propagate_names(outputs_[output_idx], names);
11805 }
11806 // super must happen after, so that downstream can use maybe_get_output
11807 // to retrieve the output
11808 at::native::structured_trunc_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
11809 }
11810 const Tensor& maybe_get_output(int64_t output_idx) override {
11811 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
11812 }
11813 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
11814 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
11815};
11816at::Tensor & wrapper_CPU_trunc_(at::Tensor & self) {
11817structured_trunc_out_inplace op(self);
11818op.meta(self);
11819op.impl(self, op.outputs_[0]);
11820if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
11821return self;
11822}
11823namespace {
11824::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___unique(const at::Tensor & self, bool sorted, bool return_inverse) {
11825 // No device check
11826 // DeviceGuard omitted
11827 return at::native::_unique_cpu(self, sorted, return_inverse);
11828}
11829} // anonymous namespace
11830namespace {
11831::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__unique_dim(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
11832 // No device check
11833 // DeviceGuard omitted
11834 return at::native::unique_dim_cpu(self, dim, sorted, return_inverse, return_counts);
11835}
11836} // anonymous namespace
11837namespace {
11838::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__unique_consecutive(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim) {
11839 // No device check
11840 // DeviceGuard omitted
11841 return at::native::unique_consecutive_cpu(self, return_inverse, return_counts, dim);
11842}
11843} // anonymous namespace
11844namespace {
11845::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__unique_dim_consecutive(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) {
11846 // No device check
11847 // DeviceGuard omitted
11848 return at::native::unique_dim_consecutive_cpu(self, dim, return_inverse, return_counts);
11849}
11850} // anonymous namespace
11851namespace {
11852::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___unique2(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) {
11853 // No device check
11854 // DeviceGuard omitted
11855 return at::native::_unique2_cpu(self, sorted, return_inverse, return_counts);
11856}
11857} // anonymous namespace
11858namespace {
11859at::Tensor wrapper_CPU_correction_var(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
11860 // No device check
11861 // DeviceGuard omitted
11862 return at::native::var(self, dim, correction, keepdim);
11863}
11864} // anonymous namespace
11865namespace {
11866at::Tensor & wrapper_CPU_correction_out_var_out(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
11867 // No device check
11868 // DeviceGuard omitted
11869 return at::native::var_out(self, dim, correction, keepdim, out);
11870}
11871} // anonymous namespace
11872namespace {
11873::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_correction_var_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
11874 // No device check
11875 // DeviceGuard omitted
11876 return at::native::var_mean(self, dim, correction, keepdim);
11877}
11878} // anonymous namespace
11879namespace {
11880at::Tensor wrapper_CPU_self_where(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
11881 // No device check
11882 // DeviceGuard omitted
11883 return at::native::where(condition, self, other);
11884}
11885} // anonymous namespace
11886namespace {
11887at::Tensor & wrapper_CPU_self_out_where_out(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11888 // No device check
11889 // DeviceGuard omitted
11890 return at::native::where_self_out(condition, self, other, out);
11891}
11892} // anonymous namespace
11893namespace {
11894::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___weight_norm_interface(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
11895 // No device check
11896 // DeviceGuard omitted
11897 return at::native::weight_norm_cpu(v, g, dim);
11898}
11899} // anonymous namespace
11900namespace {
11901::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___weight_norm_interface_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
11902 // No device check
11903 // DeviceGuard omitted
11904 return at::native::weight_norm_backward_cpu(grad_w, saved_v, saved_g, saved_norms, dim);
11905}
11906} // anonymous namespace
11907namespace {
11908at::Tensor wrapper_CPU___efficientzerotensor(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
11909 // No device check
11910 // DeviceGuard omitted
11911 return at::native::_efficientzerotensor(size, dtype, layout, device, pin_memory);
11912}
11913} // anonymous namespace
11914namespace {
11915at::Tensor wrapper_CPU___standard_gamma_grad(const at::Tensor & self, const at::Tensor & output) {
11916 // No device check
11917 // DeviceGuard omitted
11918 return at::native::_standard_gamma_grad_cpu(self, output);
11919}
11920} // anonymous namespace
11921namespace {
11922at::Tensor wrapper_CPU___standard_gamma(const at::Tensor & self, c10::optional<at::Generator> generator) {
11923 // No device check
11924 // DeviceGuard omitted
11925 return at::native::_s_gamma_cpu(self, generator);
11926}
11927} // anonymous namespace
11928namespace {
11929at::Tensor wrapper_CPU___dirichlet_grad(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
11930 // No device check
11931 // DeviceGuard omitted
11932 return at::native::_dirichlet_grad_cpu(x, alpha, total);
11933}
11934} // anonymous namespace
11935namespace {
11936at::Tensor wrapper_CPU___sample_dirichlet(const at::Tensor & self, c10::optional<at::Generator> generator) {
11937 // No device check
11938 // DeviceGuard omitted
11939 return at::native::_s_dirichlet_cpu(self, generator);
11940}
11941} // anonymous namespace
11942namespace {
11943at::Tensor wrapper_CPU__poisson(const at::Tensor & self, c10::optional<at::Generator> generator) {
11944 // No device check
11945 // DeviceGuard omitted
11946 return at::native::_s_poisson_cpu(self, generator);
11947}
11948} // anonymous namespace
11949namespace {
11950at::Tensor wrapper_CPU__binomial(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator) {
11951 // No device check
11952 // DeviceGuard omitted
11953 return at::native::_s_binomial_cpu(count, prob, generator);
11954}
11955} // anonymous namespace
11956namespace {
11957at::Tensor wrapper_CPU___spdiags(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout) {
11958 // No device check
11959 // DeviceGuard omitted
11960 return at::native::spdiags(diagonals, offsets, shape, layout);
11961}
11962} // anonymous namespace
11963struct structured_norm_dtype_out_functional final : public at::native::structured_norm_dtype_out {
11964 void set_output_strided(
11965 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11966 TensorOptions options, DimnameList names
11967 ) override {
11968 outputs_[output_idx] = create_out(sizes, strides, options);
11969 if (!names.empty()) {
11970 namedinference::propagate_names(*outputs_[output_idx], names);
11971 }
11972 // super must happen after, so that downstream can use maybe_get_output
11973 // to retrieve the output
11974 }
11975 void set_output_raw_strided(
11976 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
11977 TensorOptions options, DimnameList names
11978 ) override {
11979 outputs_[output_idx] = create_out(sizes, strides, options);
11980 if (!names.empty()) {
11981 namedinference::propagate_names(*outputs_[output_idx], names);
11982 }
11983 // super must happen after, so that downstream can use maybe_get_output
11984 // to retrieve the output
11985 }
11986 const Tensor& maybe_get_output(int64_t output_idx) override {
11987 return *outputs_[output_idx];
11988 }
11989 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
11990};
11991at::Tensor wrapper_CPU_norm_ScalarOpt_dim_dtype(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
11992structured_norm_dtype_out_functional op;
11993op.meta(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim, dtype);
11994op.impl(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim, dtype, *op.outputs_[0]);
11995return std::move(op.outputs_[0]).take();
11996}
11997struct structured_norm_dtype_out_out final : public at::native::structured_norm_dtype_out {
11998 structured_norm_dtype_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
11999 void set_output_strided(
12000 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12001 TensorOptions options, DimnameList names
12002 ) override {
12003 const auto& out = outputs_[output_idx].get();
12004 resize_out(out, sizes, strides, options);
12005 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12006 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12007 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12008 }
12009 if (!names.empty()) {
12010 namedinference::propagate_names(outputs_[output_idx], names);
12011 }
12012 // super must happen after, so that downstream can use maybe_get_output
12013 // to retrieve the output
12014 }
12015 void set_output_raw_strided(
12016 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12017 TensorOptions options, DimnameList names
12018 ) override {
12019 const auto& out = outputs_[output_idx].get();
12020 resize_out(out, sizes, strides, options);
12021 if (!names.empty()) {
12022 namedinference::propagate_names(outputs_[output_idx], names);
12023 }
12024 // super must happen after, so that downstream can use maybe_get_output
12025 // to retrieve the output
12026 }
12027 const Tensor& maybe_get_output(int64_t output_idx) override {
12028 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12029 }
12030 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12031 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12032};
12033at::Tensor & wrapper_CPU_norm_out_dtype_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
12034structured_norm_dtype_out_out op(out);
12035op.meta(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim, dtype);
12036op.impl(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim, dtype, op.maybe_get_output(0));
12037if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12038return out;
12039}
12040struct structured_norm_out_functional final : public at::native::structured_norm_out {
12041 void set_output_strided(
12042 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12043 TensorOptions options, DimnameList names
12044 ) override {
12045 outputs_[output_idx] = create_out(sizes, strides, options);
12046 if (!names.empty()) {
12047 namedinference::propagate_names(*outputs_[output_idx], names);
12048 }
12049 // super must happen after, so that downstream can use maybe_get_output
12050 // to retrieve the output
12051 }
12052 void set_output_raw_strided(
12053 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12054 TensorOptions options, DimnameList names
12055 ) override {
12056 outputs_[output_idx] = create_out(sizes, strides, options);
12057 if (!names.empty()) {
12058 namedinference::propagate_names(*outputs_[output_idx], names);
12059 }
12060 // super must happen after, so that downstream can use maybe_get_output
12061 // to retrieve the output
12062 }
12063 const Tensor& maybe_get_output(int64_t output_idx) override {
12064 return *outputs_[output_idx];
12065 }
12066 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12067};
12068at::Tensor wrapper_CPU_norm_ScalarOpt_dim(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
12069structured_norm_out_functional op;
12070op.meta(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim);
12071op.impl(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim, *op.outputs_[0]);
12072return std::move(op.outputs_[0]).take();
12073}
12074struct structured_norm_out_out final : public at::native::structured_norm_out {
12075 structured_norm_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
12076 void set_output_strided(
12077 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12078 TensorOptions options, DimnameList names
12079 ) override {
12080 const auto& out = outputs_[output_idx].get();
12081 resize_out(out, sizes, strides, options);
12082 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12083 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12084 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12085 }
12086 if (!names.empty()) {
12087 namedinference::propagate_names(outputs_[output_idx], names);
12088 }
12089 // super must happen after, so that downstream can use maybe_get_output
12090 // to retrieve the output
12091 }
12092 void set_output_raw_strided(
12093 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12094 TensorOptions options, DimnameList names
12095 ) override {
12096 const auto& out = outputs_[output_idx].get();
12097 resize_out(out, sizes, strides, options);
12098 if (!names.empty()) {
12099 namedinference::propagate_names(outputs_[output_idx], names);
12100 }
12101 // super must happen after, so that downstream can use maybe_get_output
12102 // to retrieve the output
12103 }
12104 const Tensor& maybe_get_output(int64_t output_idx) override {
12105 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12106 }
12107 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12108 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12109};
12110at::Tensor & wrapper_CPU_norm_out_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
12111structured_norm_out_out op(out);
12112op.meta(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim);
12113op.impl(self, (p.has_value() ? at::OptionalScalarRef(&(p.value())) : at::OptionalScalarRef()), dim, keepdim, op.maybe_get_output(0));
12114if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12115return out;
12116}
12117namespace {
12118::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_Tensor_out_frexp_out(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) {
12119 // No device check
12120 // DeviceGuard omitted
12121 return at::native::frexp_out(self, mantissa, exponent);
12122}
12123} // anonymous namespace
12124namespace {
12125at::Tensor & wrapper_CPU__zero_(at::Tensor & self) {
12126 // No device check
12127 // DeviceGuard omitted
12128 return at::native::zero_(self);
12129}
12130} // anonymous namespace
12131struct structured_sub_out_functional final : public at::native::structured_sub_out {
12132 void set_output_strided(
12133 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12134 TensorOptions options, DimnameList names
12135 ) override {
12136 outputs_[output_idx] = create_out(sizes, strides, options);
12137 if (!names.empty()) {
12138 namedinference::propagate_names(*outputs_[output_idx], names);
12139 }
12140 // super must happen after, so that downstream can use maybe_get_output
12141 // to retrieve the output
12142 at::native::structured_sub_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
12143 }
12144 void set_output_raw_strided(
12145 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12146 TensorOptions options, DimnameList names
12147 ) override {
12148 outputs_[output_idx] = create_out(sizes, strides, options);
12149 if (!names.empty()) {
12150 namedinference::propagate_names(*outputs_[output_idx], names);
12151 }
12152 // super must happen after, so that downstream can use maybe_get_output
12153 // to retrieve the output
12154 at::native::structured_sub_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
12155 }
12156 const Tensor& maybe_get_output(int64_t output_idx) override {
12157 return *outputs_[output_idx];
12158 }
12159 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12160};
12161at::Tensor wrapper_CPU_sub_Tensor(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
12162structured_sub_out_functional op;
12163op.meta(self, other, alpha);
12164op.impl(self, other, alpha, *op.outputs_[0]);
12165return std::move(op.outputs_[0]).take();
12166}
12167struct structured_sub_out_out final : public at::native::structured_sub_out {
12168 structured_sub_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
12169 void set_output_strided(
12170 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12171 TensorOptions options, DimnameList names
12172 ) override {
12173 const auto& out = outputs_[output_idx].get();
12174 resize_out(out, sizes, strides, options);
12175 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12176 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12177 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12178 }
12179 if (!names.empty()) {
12180 namedinference::propagate_names(outputs_[output_idx], names);
12181 }
12182 // super must happen after, so that downstream can use maybe_get_output
12183 // to retrieve the output
12184 at::native::structured_sub_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
12185 }
12186 void set_output_raw_strided(
12187 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12188 TensorOptions options, DimnameList names
12189 ) override {
12190 const auto& out = outputs_[output_idx].get();
12191 resize_out(out, sizes, strides, options);
12192 if (!names.empty()) {
12193 namedinference::propagate_names(outputs_[output_idx], names);
12194 }
12195 // super must happen after, so that downstream can use maybe_get_output
12196 // to retrieve the output
12197 at::native::structured_sub_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
12198 }
12199 const Tensor& maybe_get_output(int64_t output_idx) override {
12200 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12201 }
12202 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12203 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12204};
12205at::Tensor & wrapper_CPU_sub_out_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
12206structured_sub_out_out op(out);
12207op.meta(self, other, alpha);
12208op.impl(self, other, alpha, op.maybe_get_output(0));
12209if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12210return out;
12211}
12212struct structured_sub_out_inplace final : public at::native::structured_sub_out {
12213 structured_sub_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
12214 void set_output_strided(
12215 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12216 TensorOptions options, DimnameList names
12217 ) override {
12218 const auto& out = outputs_[output_idx].get();
12219 check_inplace(out, sizes, options);
12220 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12221 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12222 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12223 }
12224 if (!names.empty()) {
12225 namedinference::propagate_names(outputs_[output_idx], names);
12226 }
12227 // super must happen after, so that downstream can use maybe_get_output
12228 // to retrieve the output
12229 at::native::structured_sub_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
12230 }
12231 void set_output_raw_strided(
12232 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12233 TensorOptions options, DimnameList names
12234 ) override {
12235 const auto& out = outputs_[output_idx].get();
12236 check_inplace(out, sizes, options);
12237 if (!names.empty()) {
12238 namedinference::propagate_names(outputs_[output_idx], names);
12239 }
12240 // super must happen after, so that downstream can use maybe_get_output
12241 // to retrieve the output
12242 at::native::structured_sub_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
12243 }
12244 const Tensor& maybe_get_output(int64_t output_idx) override {
12245 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12246 }
12247 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12248 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12249};
12250at::Tensor & wrapper_CPU_sub__Tensor(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
12251structured_sub_out_inplace op(self);
12252op.meta(self, other, alpha);
12253op.impl(self, other, alpha, op.outputs_[0]);
12254if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12255return self;
12256}
12257namespace {
12258at::Tensor wrapper_CPU_Tensor_rsub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
12259 // No device check
12260 // DeviceGuard omitted
12261 return at::native::rsub(self, other, alpha);
12262}
12263} // anonymous namespace
12264struct structured_heaviside_out_functional final : public at::native::structured_heaviside_out {
12265 void set_output_strided(
12266 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12267 TensorOptions options, DimnameList names
12268 ) override {
12269 outputs_[output_idx] = create_out(sizes, strides, options);
12270 if (!names.empty()) {
12271 namedinference::propagate_names(*outputs_[output_idx], names);
12272 }
12273 // super must happen after, so that downstream can use maybe_get_output
12274 // to retrieve the output
12275 at::native::structured_heaviside_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
12276 }
12277 void set_output_raw_strided(
12278 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12279 TensorOptions options, DimnameList names
12280 ) override {
12281 outputs_[output_idx] = create_out(sizes, strides, options);
12282 if (!names.empty()) {
12283 namedinference::propagate_names(*outputs_[output_idx], names);
12284 }
12285 // super must happen after, so that downstream can use maybe_get_output
12286 // to retrieve the output
12287 at::native::structured_heaviside_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
12288 }
12289 const Tensor& maybe_get_output(int64_t output_idx) override {
12290 return *outputs_[output_idx];
12291 }
12292 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12293};
12294at::Tensor wrapper_CPU_heaviside(const at::Tensor & self, const at::Tensor & values) {
12295structured_heaviside_out_functional op;
12296op.meta(self, values);
12297op.impl(self, values, *op.outputs_[0]);
12298return std::move(op.outputs_[0]).take();
12299}
12300struct structured_heaviside_out_out final : public at::native::structured_heaviside_out {
12301 structured_heaviside_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
12302 void set_output_strided(
12303 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12304 TensorOptions options, DimnameList names
12305 ) override {
12306 const auto& out = outputs_[output_idx].get();
12307 resize_out(out, sizes, strides, options);
12308 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12309 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12310 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12311 }
12312 if (!names.empty()) {
12313 namedinference::propagate_names(outputs_[output_idx], names);
12314 }
12315 // super must happen after, so that downstream can use maybe_get_output
12316 // to retrieve the output
12317 at::native::structured_heaviside_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
12318 }
12319 void set_output_raw_strided(
12320 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12321 TensorOptions options, DimnameList names
12322 ) override {
12323 const auto& out = outputs_[output_idx].get();
12324 resize_out(out, sizes, strides, options);
12325 if (!names.empty()) {
12326 namedinference::propagate_names(outputs_[output_idx], names);
12327 }
12328 // super must happen after, so that downstream can use maybe_get_output
12329 // to retrieve the output
12330 at::native::structured_heaviside_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
12331 }
12332 const Tensor& maybe_get_output(int64_t output_idx) override {
12333 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12334 }
12335 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12336 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12337};
12338at::Tensor & wrapper_CPU_heaviside_out_out(const at::Tensor & self, const at::Tensor & values, at::Tensor & out) {
12339structured_heaviside_out_out op(out);
12340op.meta(self, values);
12341op.impl(self, values, op.maybe_get_output(0));
12342if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12343return out;
12344}
12345struct structured_heaviside_out_inplace final : public at::native::structured_heaviside_out {
12346 structured_heaviside_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
12347 void set_output_strided(
12348 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12349 TensorOptions options, DimnameList names
12350 ) override {
12351 const auto& out = outputs_[output_idx].get();
12352 check_inplace(out, sizes, options);
12353 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12354 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12355 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12356 }
12357 if (!names.empty()) {
12358 namedinference::propagate_names(outputs_[output_idx], names);
12359 }
12360 // super must happen after, so that downstream can use maybe_get_output
12361 // to retrieve the output
12362 at::native::structured_heaviside_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
12363 }
12364 void set_output_raw_strided(
12365 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12366 TensorOptions options, DimnameList names
12367 ) override {
12368 const auto& out = outputs_[output_idx].get();
12369 check_inplace(out, sizes, options);
12370 if (!names.empty()) {
12371 namedinference::propagate_names(outputs_[output_idx], names);
12372 }
12373 // super must happen after, so that downstream can use maybe_get_output
12374 // to retrieve the output
12375 at::native::structured_heaviside_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
12376 }
12377 const Tensor& maybe_get_output(int64_t output_idx) override {
12378 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12379 }
12380 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12381 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12382};
12383at::Tensor & wrapper_CPU_heaviside_(at::Tensor & self, const at::Tensor & values) {
12384structured_heaviside_out_inplace op(self);
12385op.meta(self, values);
12386op.impl(self, values, op.outputs_[0]);
12387if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12388return self;
12389}
12390struct structured_addmm_out_cpu_functional final : public at::native::structured_addmm_out_cpu {
12391 void set_output_strided(
12392 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12393 TensorOptions options, DimnameList names
12394 ) override {
12395 outputs_[output_idx] = create_out(sizes, strides, options);
12396 if (!names.empty()) {
12397 namedinference::propagate_names(*outputs_[output_idx], names);
12398 }
12399 // super must happen after, so that downstream can use maybe_get_output
12400 // to retrieve the output
12401 }
12402 void set_output_raw_strided(
12403 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12404 TensorOptions options, DimnameList names
12405 ) override {
12406 outputs_[output_idx] = create_out(sizes, strides, options);
12407 if (!names.empty()) {
12408 namedinference::propagate_names(*outputs_[output_idx], names);
12409 }
12410 // super must happen after, so that downstream can use maybe_get_output
12411 // to retrieve the output
12412 }
12413 const Tensor& maybe_get_output(int64_t output_idx) override {
12414 return *outputs_[output_idx];
12415 }
12416 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12417};
12418at::Tensor wrapper_CPU_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
12419structured_addmm_out_cpu_functional op;
12420op.meta(self, mat1, mat2, beta, alpha);
12421op.impl(self, mat1, mat2, beta, alpha, *op.outputs_[0]);
12422return std::move(op.outputs_[0]).take();
12423}
12424struct structured_addmm_out_cpu_out final : public at::native::structured_addmm_out_cpu {
12425 structured_addmm_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
12426 void set_output_strided(
12427 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12428 TensorOptions options, DimnameList names
12429 ) override {
12430 const auto& out = outputs_[output_idx].get();
12431 resize_out(out, sizes, strides, options);
12432 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12433 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12434 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12435 }
12436 if (!names.empty()) {
12437 namedinference::propagate_names(outputs_[output_idx], names);
12438 }
12439 // super must happen after, so that downstream can use maybe_get_output
12440 // to retrieve the output
12441 }
12442 void set_output_raw_strided(
12443 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12444 TensorOptions options, DimnameList names
12445 ) override {
12446 const auto& out = outputs_[output_idx].get();
12447 resize_out(out, sizes, strides, options);
12448 if (!names.empty()) {
12449 namedinference::propagate_names(outputs_[output_idx], names);
12450 }
12451 // super must happen after, so that downstream can use maybe_get_output
12452 // to retrieve the output
12453 }
12454 const Tensor& maybe_get_output(int64_t output_idx) override {
12455 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12456 }
12457 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12458 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12459};
12460at::Tensor & wrapper_CPU_addmm_out_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
12461structured_addmm_out_cpu_out op(out);
12462op.meta(self, mat1, mat2, beta, alpha);
12463op.impl(self, mat1, mat2, beta, alpha, op.maybe_get_output(0));
12464if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12465return out;
12466}
12467struct structured_addmm_out_cpu_inplace final : public at::native::structured_addmm_out_cpu {
12468 structured_addmm_out_cpu_inplace(Tensor& self) : outputs_{std::ref(self)} {}
12469 void set_output_strided(
12470 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12471 TensorOptions options, DimnameList names
12472 ) override {
12473 const auto& out = outputs_[output_idx].get();
12474 check_inplace(out, sizes, options);
12475 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12476 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12477 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12478 }
12479 if (!names.empty()) {
12480 namedinference::propagate_names(outputs_[output_idx], names);
12481 }
12482 // super must happen after, so that downstream can use maybe_get_output
12483 // to retrieve the output
12484 }
12485 void set_output_raw_strided(
12486 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12487 TensorOptions options, DimnameList names
12488 ) override {
12489 const auto& out = outputs_[output_idx].get();
12490 check_inplace(out, sizes, options);
12491 if (!names.empty()) {
12492 namedinference::propagate_names(outputs_[output_idx], names);
12493 }
12494 // super must happen after, so that downstream can use maybe_get_output
12495 // to retrieve the output
12496 }
12497 const Tensor& maybe_get_output(int64_t output_idx) override {
12498 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12499 }
12500 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12501 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12502};
12503at::Tensor & wrapper_CPU_addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
12504structured_addmm_out_cpu_inplace op(self);
12505op.meta(self, mat1, mat2, beta, alpha);
12506op.impl(self, mat1, mat2, beta, alpha, op.outputs_[0]);
12507if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12508return self;
12509}
12510struct structured_addmm_activation_out_cpu_functional final : public at::native::structured_addmm_activation_out_cpu {
12511 void set_output_strided(
12512 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12513 TensorOptions options, DimnameList names
12514 ) override {
12515 outputs_[output_idx] = create_out(sizes, strides, options);
12516 if (!names.empty()) {
12517 namedinference::propagate_names(*outputs_[output_idx], names);
12518 }
12519 // super must happen after, so that downstream can use maybe_get_output
12520 // to retrieve the output
12521 }
12522 void set_output_raw_strided(
12523 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12524 TensorOptions options, DimnameList names
12525 ) override {
12526 outputs_[output_idx] = create_out(sizes, strides, options);
12527 if (!names.empty()) {
12528 namedinference::propagate_names(*outputs_[output_idx], names);
12529 }
12530 // super must happen after, so that downstream can use maybe_get_output
12531 // to retrieve the output
12532 }
12533 const Tensor& maybe_get_output(int64_t output_idx) override {
12534 return *outputs_[output_idx];
12535 }
12536 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12537};
12538at::Tensor wrapper_CPU__addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
12539structured_addmm_activation_out_cpu_functional op;
12540op.meta(self, mat1, mat2, beta, alpha, use_gelu);
12541op.impl(self, mat1, mat2, beta, alpha, use_gelu, *op.outputs_[0]);
12542return std::move(op.outputs_[0]).take();
12543}
12544struct structured_addmm_activation_out_cpu_out final : public at::native::structured_addmm_activation_out_cpu {
12545 structured_addmm_activation_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
12546 void set_output_strided(
12547 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12548 TensorOptions options, DimnameList names
12549 ) override {
12550 const auto& out = outputs_[output_idx].get();
12551 resize_out(out, sizes, strides, options);
12552 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12553 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12554 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12555 }
12556 if (!names.empty()) {
12557 namedinference::propagate_names(outputs_[output_idx], names);
12558 }
12559 // super must happen after, so that downstream can use maybe_get_output
12560 // to retrieve the output
12561 }
12562 void set_output_raw_strided(
12563 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12564 TensorOptions options, DimnameList names
12565 ) override {
12566 const auto& out = outputs_[output_idx].get();
12567 resize_out(out, sizes, strides, options);
12568 if (!names.empty()) {
12569 namedinference::propagate_names(outputs_[output_idx], names);
12570 }
12571 // super must happen after, so that downstream can use maybe_get_output
12572 // to retrieve the output
12573 }
12574 const Tensor& maybe_get_output(int64_t output_idx) override {
12575 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12576 }
12577 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12578 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12579};
12580at::Tensor & wrapper_CPU__addmm_activation_out_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) {
12581structured_addmm_activation_out_cpu_out op(out);
12582op.meta(self, mat1, mat2, beta, alpha, use_gelu);
12583op.impl(self, mat1, mat2, beta, alpha, use_gelu, op.maybe_get_output(0));
12584if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12585return out;
12586}
12587namespace {
12588int64_t wrapper_CPU__sparse_dim(const at::Tensor & self) {
12589 // No device check
12590 // DeviceGuard omitted
12591 return at::native::sparse_dim_strided(self);
12592}
12593} // anonymous namespace
12594namespace {
12595int64_t wrapper_CPU__dense_dim(const at::Tensor & self) {
12596 // No device check
12597 // DeviceGuard omitted
12598 return at::native::dense_dim_strided(self);
12599}
12600} // anonymous namespace
12601namespace {
12602at::Tensor wrapper_CPU_sparse_dim_to_sparse(const at::Tensor & self, int64_t sparse_dim) {
12603 // No device check
12604 // DeviceGuard omitted
12605 return at::native::dense_to_sparse(self, sparse_dim);
12606}
12607} // anonymous namespace
12608namespace {
12609at::Tensor wrapper_CPU__to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
12610 // No device check
12611 // DeviceGuard omitted
12612 return at::native::dense_to_sparse(self, layout, blocksize, dense_dim);
12613}
12614} // anonymous namespace
12615namespace {
12616at::Tensor wrapper_CPU__to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
12617 // No device check
12618 // DeviceGuard omitted
12619 return at::native::dense_to_sparse_csr(self, dense_dim);
12620}
12621} // anonymous namespace
12622namespace {
12623at::Tensor wrapper_CPU__to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
12624 // No device check
12625 // DeviceGuard omitted
12626 return at::native::dense_to_sparse_csc(self, dense_dim);
12627}
12628} // anonymous namespace
12629namespace {
12630at::Tensor wrapper_CPU__to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
12631 // No device check
12632 // DeviceGuard omitted
12633 return at::native::dense_to_sparse_bsr(self, blocksize, dense_dim);
12634}
12635} // anonymous namespace
12636namespace {
12637at::Tensor wrapper_CPU__to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
12638 // No device check
12639 // DeviceGuard omitted
12640 return at::native::dense_to_sparse_bsc(self, blocksize, dense_dim);
12641}
12642} // anonymous namespace
12643namespace {
12644at::Tensor wrapper_CPU__to_mkldnn(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
12645 // No device check
12646 // DeviceGuard omitted
12647 return at::native::dense_to_mkldnn(self, dtype);
12648}
12649} // anonymous namespace
12650namespace {
12651at::Tensor wrapper_CPU__quantize_per_tensor_dynamic(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
12652 // No device check
12653 // DeviceGuard omitted
12654 return at::native::quantize_per_tensor_dynamic(self, dtype, reduce_range);
12655}
12656} // anonymous namespace
12657namespace {
12658at::Tensor wrapper_CPU__quantize_per_tensor(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
12659 // No device check
12660 // DeviceGuard omitted
12661 return at::native::quantize_per_tensor(self, scale, zero_point, dtype);
12662}
12663} // anonymous namespace
12664namespace {
12665at::Tensor wrapper_CPU_tensor_qparams_quantize_per_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
12666 // No device check
12667 // DeviceGuard omitted
12668 return at::native::quantize_per_tensor_tensor_qparams(self, scale, zero_point, dtype);
12669}
12670} // anonymous namespace
12671namespace {
12672::std::vector<at::Tensor> wrapper_CPU_tensors_quantize_per_tensor(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
12673 // No device check
12674 // DeviceGuard omitted
12675 return at::native::quantize_per_tensor_list_cpu(tensors, scales, zero_points, dtype);
12676}
12677} // anonymous namespace
12678namespace {
12679at::Tensor wrapper_CPU__quantize_per_channel(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
12680 // No device check
12681 // DeviceGuard omitted
12682 return at::native::quantize_per_channel(self, scales, zero_points, axis, dtype);
12683}
12684} // anonymous namespace
12685namespace {
12686at::Tensor wrapper_CPU_self_dequantize(const at::Tensor & self) {
12687 // No device check
12688 // DeviceGuard omitted
12689 return at::native::dequantize_cpu_or_cuda(self);
12690}
12691} // anonymous namespace
12692namespace {
12693at::Tensor wrapper_CPU___make_per_tensor_quantized_tensor(const at::Tensor & self, double scale, int64_t zero_point) {
12694 // No device check
12695 // DeviceGuard omitted
12696 return at::native::make_per_tensor_quantized_tensor_cpu(self, scale, zero_point);
12697}
12698} // anonymous namespace
12699namespace {
12700at::Tensor wrapper_CPU___make_per_channel_quantized_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
12701 // No device check
12702 // DeviceGuard omitted
12703 return at::native::make_per_channel_quantized_tensor_cpu(self, scale, zero_point, axis);
12704}
12705} // anonymous namespace
12706namespace {
12707::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__fake_quantize_per_tensor_affine_cachemask(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
12708 // No device check
12709 // DeviceGuard omitted
12710 return at::native::fake_quantize_per_tensor_affine_cachemask(self, scale, zero_point, quant_min, quant_max);
12711}
12712} // anonymous namespace
12713namespace {
12714::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___fake_quantize_per_tensor_affine_cachemask_tensor_qparams(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
12715 // No device check
12716 // DeviceGuard omitted
12717 return at::native::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
12718}
12719} // anonymous namespace
12720namespace {
12721at::Tensor wrapper_CPU___fake_quantize_learnable_per_tensor_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
12722 // No device check
12723 // DeviceGuard omitted
12724 return at::native::_fake_quantize_learnable_per_tensor_affine(self, scale, zero_point, quant_min, quant_max, grad_factor);
12725}
12726} // anonymous namespace
12727namespace {
12728::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___fake_quantize_learnable_per_tensor_affine_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
12729 // No device check
12730 // DeviceGuard omitted
12731 return at::native::_fake_quantize_learnable_per_tensor_affine_backward(grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
12732}
12733} // anonymous namespace
12734namespace {
12735::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__fake_quantize_per_channel_affine_cachemask(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
12736 // No device check
12737 // DeviceGuard omitted
12738 return at::native::fake_quantize_per_channel_affine_cachemask(self, scale, zero_point, axis, quant_min, quant_max);
12739}
12740} // anonymous namespace
12741namespace {
12742at::Tensor wrapper_CPU___fake_quantize_learnable_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
12743 // No device check
12744 // DeviceGuard omitted
12745 return at::native::_fake_quantize_learnable_per_channel_affine(self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
12746}
12747} // anonymous namespace
12748namespace {
12749::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___fake_quantize_learnable_per_channel_affine_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
12750 // No device check
12751 // DeviceGuard omitted
12752 return at::native::_fake_quantize_learnable_per_channel_affine_backward(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
12753}
12754} // anonymous namespace
12755namespace {
12756::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___fused_moving_avg_obs_fq_helper(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
12757 // No device check
12758 // DeviceGuard omitted
12759 return at::native::fused_moving_avg_obs_fake_quant_cpu(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
12760}
12761} // anonymous namespace
12762namespace {
12763at::Scalar wrapper_CPU___local_scalar_dense(const at::Tensor & self) {
12764 // No device check
12765 // DeviceGuard omitted
12766 return at::native::_local_scalar_dense_cpu(self);
12767}
12768} // anonymous namespace
12769namespace {
12770at::Tensor & wrapper_CPU_source_Storage_set_(at::Tensor & self, at::Storage source) {
12771 // No device check
12772 // DeviceGuard omitted
12773 return at::native::set_(self, source);
12774}
12775} // anonymous namespace
12776namespace {
12777at::Tensor & wrapper_CPU_source_Storage_storage_offset_set_(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
12778 // No device check
12779 // DeviceGuard omitted
12780 return at::native::set_storage_cpu_(self, source, storage_offset.expect_int(), C10_AS_INTARRAYREF_SLOW(size), C10_AS_INTARRAYREF_SLOW(stride));
12781}
12782} // anonymous namespace
12783namespace {
12784at::Tensor & wrapper_CPU_source_Tensor_set_(at::Tensor & self, const at::Tensor & source) {
12785 // No device check
12786 // DeviceGuard omitted
12787 return at::native::set_tensor_(self, source);
12788}
12789} // anonymous namespace
12790namespace {
12791at::Tensor & wrapper_CPU__set_(at::Tensor & self) {
12792 // No device check
12793 // DeviceGuard omitted
12794 return at::native::set_cpu_(self);
12795}
12796} // anonymous namespace
12797namespace {
12798bool wrapper_CPU__is_set_to(const at::Tensor & self, const at::Tensor & tensor) {
12799 // No device check
12800 // DeviceGuard omitted
12801 return at::native::is_set_to(self, tensor);
12802}
12803} // anonymous namespace
12804namespace {
12805at::Tensor & wrapper_CPU_Scalar_masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
12806 // No device check
12807 // DeviceGuard omitted
12808 return at::native::masked_fill__cpu(self, mask, value);
12809}
12810} // anonymous namespace
12811namespace {
12812at::Tensor & wrapper_CPU_Tensor_masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
12813 // No device check
12814 // DeviceGuard omitted
12815 return at::native::masked_fill__cpu(self, mask, value);
12816}
12817} // anonymous namespace
12818namespace {
12819at::Tensor & wrapper_CPU__masked_scatter_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
12820 // No device check
12821 // DeviceGuard omitted
12822 return at::native::masked_scatter__cpu(self, mask, source);
12823}
12824} // anonymous namespace
12825namespace {
12826at::Tensor wrapper_CPU___masked_softmax(const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type) {
12827 // No device check
12828 // DeviceGuard omitted
12829 return at::native::masked_softmax_cpu(self, mask, dim, mask_type);
12830}
12831} // anonymous namespace
12832namespace {
12833at::Tensor wrapper_CPU___masked_softmax_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim) {
12834 // No device check
12835 // DeviceGuard omitted
12836 return at::native::masked_softmax_backward_cpu(grad_output, output, mask, dim);
12837}
12838} // anonymous namespace
12839namespace {
12840at::Tensor wrapper_CPU__view(const at::Tensor & self, c10::SymIntArrayRef size) {
12841 // No device check
12842 // DeviceGuard omitted
12843 return at::native::view(self, C10_AS_INTARRAYREF_SLOW(size));
12844}
12845} // anonymous namespace
12846namespace {
12847at::Tensor & wrapper_CPU__put_(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
12848 // No device check
12849 // DeviceGuard omitted
12850 return at::native::put_(self, index, source, accumulate);
12851}
12852} // anonymous namespace
12853struct structured_index_add_cpu_out_functional final : public at::native::structured_index_add_cpu_out {
12854 void set_output_strided(
12855 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12856 TensorOptions options, DimnameList names
12857 ) override {
12858 outputs_[output_idx] = create_out(sizes, strides, options);
12859 if (!names.empty()) {
12860 namedinference::propagate_names(*outputs_[output_idx], names);
12861 }
12862 // super must happen after, so that downstream can use maybe_get_output
12863 // to retrieve the output
12864 }
12865 void set_output_raw_strided(
12866 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12867 TensorOptions options, DimnameList names
12868 ) override {
12869 outputs_[output_idx] = create_out(sizes, strides, options);
12870 if (!names.empty()) {
12871 namedinference::propagate_names(*outputs_[output_idx], names);
12872 }
12873 // super must happen after, so that downstream can use maybe_get_output
12874 // to retrieve the output
12875 }
12876 const Tensor& maybe_get_output(int64_t output_idx) override {
12877 return *outputs_[output_idx];
12878 }
12879 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
12880};
12881at::Tensor wrapper_CPU_index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
12882structured_index_add_cpu_out_functional op;
12883auto precompute = op.meta(self, dim, index, source, alpha);
12884(void)precompute;
12885op.impl(self, precompute.dim, index, source, alpha, *op.outputs_[0]);
12886return std::move(op.outputs_[0]).take();
12887}
12888struct structured_index_add_cpu_out_out final : public at::native::structured_index_add_cpu_out {
12889 structured_index_add_cpu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
12890 void set_output_strided(
12891 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12892 TensorOptions options, DimnameList names
12893 ) override {
12894 const auto& out = outputs_[output_idx].get();
12895 resize_out(out, sizes, strides, options);
12896 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12897 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12898 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12899 }
12900 if (!names.empty()) {
12901 namedinference::propagate_names(outputs_[output_idx], names);
12902 }
12903 // super must happen after, so that downstream can use maybe_get_output
12904 // to retrieve the output
12905 }
12906 void set_output_raw_strided(
12907 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12908 TensorOptions options, DimnameList names
12909 ) override {
12910 const auto& out = outputs_[output_idx].get();
12911 resize_out(out, sizes, strides, options);
12912 if (!names.empty()) {
12913 namedinference::propagate_names(outputs_[output_idx], names);
12914 }
12915 // super must happen after, so that downstream can use maybe_get_output
12916 // to retrieve the output
12917 }
12918 const Tensor& maybe_get_output(int64_t output_idx) override {
12919 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12920 }
12921 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12922 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12923};
12924at::Tensor & wrapper_CPU_index_add_out_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) {
12925structured_index_add_cpu_out_out op(out);
12926auto precompute = op.meta(self, dim, index, source, alpha);
12927(void)precompute;
12928op.impl(self, precompute.dim, index, source, alpha, op.maybe_get_output(0));
12929if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12930return out;
12931}
12932struct structured_index_add_cpu_out_inplace final : public at::native::structured_index_add_cpu_out {
12933 structured_index_add_cpu_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
12934 void set_output_strided(
12935 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12936 TensorOptions options, DimnameList names
12937 ) override {
12938 const auto& out = outputs_[output_idx].get();
12939 check_inplace(out, sizes, options);
12940 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
12941 if (C10_UNLIKELY(maybe_proxy.has_value())) {
12942 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
12943 }
12944 if (!names.empty()) {
12945 namedinference::propagate_names(outputs_[output_idx], names);
12946 }
12947 // super must happen after, so that downstream can use maybe_get_output
12948 // to retrieve the output
12949 }
12950 void set_output_raw_strided(
12951 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12952 TensorOptions options, DimnameList names
12953 ) override {
12954 const auto& out = outputs_[output_idx].get();
12955 check_inplace(out, sizes, options);
12956 if (!names.empty()) {
12957 namedinference::propagate_names(outputs_[output_idx], names);
12958 }
12959 // super must happen after, so that downstream can use maybe_get_output
12960 // to retrieve the output
12961 }
12962 const Tensor& maybe_get_output(int64_t output_idx) override {
12963 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
12964 }
12965 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
12966 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
12967};
12968at::Tensor & wrapper_CPU_index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
12969structured_index_add_cpu_out_inplace op(self);
12970auto precompute = op.meta(self, dim, index, source, alpha);
12971(void)precompute;
12972op.impl(self, precompute.dim, index, source, alpha, op.outputs_[0]);
12973if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
12974return self;
12975}
12976struct structured_index_reduce_cpu_out_functional final : public at::native::structured_index_reduce_cpu_out {
12977 void set_output_strided(
12978 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12979 TensorOptions options, DimnameList names
12980 ) override {
12981 outputs_[output_idx] = create_out(sizes, strides, options);
12982 if (!names.empty()) {
12983 namedinference::propagate_names(*outputs_[output_idx], names);
12984 }
12985 // super must happen after, so that downstream can use maybe_get_output
12986 // to retrieve the output
12987 }
12988 void set_output_raw_strided(
12989 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
12990 TensorOptions options, DimnameList names
12991 ) override {
12992 outputs_[output_idx] = create_out(sizes, strides, options);
12993 if (!names.empty()) {
12994 namedinference::propagate_names(*outputs_[output_idx], names);
12995 }
12996 // super must happen after, so that downstream can use maybe_get_output
12997 // to retrieve the output
12998 }
12999 const Tensor& maybe_get_output(int64_t output_idx) override {
13000 return *outputs_[output_idx];
13001 }
13002 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13003};
13004at::Tensor wrapper_CPU_index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
13005structured_index_reduce_cpu_out_functional op;
13006auto precompute = op.meta(self, dim, index, source, reduce, include_self);
13007(void)precompute;
13008op.impl(self, precompute.dim, index, source, reduce, include_self, *op.outputs_[0]);
13009return std::move(op.outputs_[0]).take();
13010}
13011struct structured_index_reduce_cpu_out_out final : public at::native::structured_index_reduce_cpu_out {
13012 structured_index_reduce_cpu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
13013 void set_output_strided(
13014 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13015 TensorOptions options, DimnameList names
13016 ) override {
13017 const auto& out = outputs_[output_idx].get();
13018 resize_out(out, sizes, strides, options);
13019 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13020 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13021 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13022 }
13023 if (!names.empty()) {
13024 namedinference::propagate_names(outputs_[output_idx], names);
13025 }
13026 // super must happen after, so that downstream can use maybe_get_output
13027 // to retrieve the output
13028 }
13029 void set_output_raw_strided(
13030 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13031 TensorOptions options, DimnameList names
13032 ) override {
13033 const auto& out = outputs_[output_idx].get();
13034 resize_out(out, sizes, strides, options);
13035 if (!names.empty()) {
13036 namedinference::propagate_names(outputs_[output_idx], names);
13037 }
13038 // super must happen after, so that downstream can use maybe_get_output
13039 // to retrieve the output
13040 }
13041 const Tensor& maybe_get_output(int64_t output_idx) override {
13042 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13043 }
13044 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13045 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13046};
13047at::Tensor & wrapper_CPU_index_reduce_out_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) {
13048structured_index_reduce_cpu_out_out op(out);
13049auto precompute = op.meta(self, dim, index, source, reduce, include_self);
13050(void)precompute;
13051op.impl(self, precompute.dim, index, source, reduce, include_self, op.maybe_get_output(0));
13052if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13053return out;
13054}
13055struct structured_index_reduce_cpu_out_inplace final : public at::native::structured_index_reduce_cpu_out {
13056 structured_index_reduce_cpu_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13057 void set_output_strided(
13058 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13059 TensorOptions options, DimnameList names
13060 ) override {
13061 const auto& out = outputs_[output_idx].get();
13062 check_inplace(out, sizes, options);
13063 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13064 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13065 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13066 }
13067 if (!names.empty()) {
13068 namedinference::propagate_names(outputs_[output_idx], names);
13069 }
13070 // super must happen after, so that downstream can use maybe_get_output
13071 // to retrieve the output
13072 }
13073 void set_output_raw_strided(
13074 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13075 TensorOptions options, DimnameList names
13076 ) override {
13077 const auto& out = outputs_[output_idx].get();
13078 check_inplace(out, sizes, options);
13079 if (!names.empty()) {
13080 namedinference::propagate_names(outputs_[output_idx], names);
13081 }
13082 // super must happen after, so that downstream can use maybe_get_output
13083 // to retrieve the output
13084 }
13085 const Tensor& maybe_get_output(int64_t output_idx) override {
13086 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13087 }
13088 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13089 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13090};
13091at::Tensor & wrapper_CPU_index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
13092structured_index_reduce_cpu_out_inplace op(self);
13093auto precompute = op.meta(self, dim, index, source, reduce, include_self);
13094(void)precompute;
13095op.impl(self, precompute.dim, index, source, reduce, include_self, op.outputs_[0]);
13096if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13097return self;
13098}
13099namespace {
13100at::Tensor & wrapper_CPU_int_Scalar_index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
13101 // No device check
13102 // DeviceGuard omitted
13103 return at::native::index_fill_(self, dim, index, value);
13104}
13105} // anonymous namespace
13106namespace {
13107at::Tensor & wrapper_CPU_int_Tensor_index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
13108 // No device check
13109 // DeviceGuard omitted
13110 return at::native::index_fill_(self, dim, index, value);
13111}
13112} // anonymous namespace
13113struct structured_scatter_src_out_functional final : public at::native::structured_scatter_src_out {
13114 void set_output_strided(
13115 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13116 TensorOptions options, DimnameList names
13117 ) override {
13118 outputs_[output_idx] = create_out(sizes, strides, options);
13119 if (!names.empty()) {
13120 namedinference::propagate_names(*outputs_[output_idx], names);
13121 }
13122 // super must happen after, so that downstream can use maybe_get_output
13123 // to retrieve the output
13124 }
13125 void set_output_raw_strided(
13126 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13127 TensorOptions options, DimnameList names
13128 ) override {
13129 outputs_[output_idx] = create_out(sizes, strides, options);
13130 if (!names.empty()) {
13131 namedinference::propagate_names(*outputs_[output_idx], names);
13132 }
13133 // super must happen after, so that downstream can use maybe_get_output
13134 // to retrieve the output
13135 }
13136 const Tensor& maybe_get_output(int64_t output_idx) override {
13137 return *outputs_[output_idx];
13138 }
13139 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13140};
13141at::Tensor wrapper_CPU_scatter_src(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
13142structured_scatter_src_out_functional op;
13143op.meta(self, dim, index, src);
13144op.impl(self, dim, index, src, *op.outputs_[0]);
13145return std::move(op.outputs_[0]).take();
13146}
13147struct structured_scatter_src_out_out final : public at::native::structured_scatter_src_out {
13148 structured_scatter_src_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
13149 void set_output_strided(
13150 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13151 TensorOptions options, DimnameList names
13152 ) override {
13153 const auto& out = outputs_[output_idx].get();
13154 resize_out(out, sizes, strides, options);
13155 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13156 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13157 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13158 }
13159 if (!names.empty()) {
13160 namedinference::propagate_names(outputs_[output_idx], names);
13161 }
13162 // super must happen after, so that downstream can use maybe_get_output
13163 // to retrieve the output
13164 }
13165 void set_output_raw_strided(
13166 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13167 TensorOptions options, DimnameList names
13168 ) override {
13169 const auto& out = outputs_[output_idx].get();
13170 resize_out(out, sizes, strides, options);
13171 if (!names.empty()) {
13172 namedinference::propagate_names(outputs_[output_idx], names);
13173 }
13174 // super must happen after, so that downstream can use maybe_get_output
13175 // to retrieve the output
13176 }
13177 const Tensor& maybe_get_output(int64_t output_idx) override {
13178 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13179 }
13180 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13181 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13182};
13183at::Tensor & wrapper_CPU_scatter_out_src_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
13184structured_scatter_src_out_out op(out);
13185op.meta(self, dim, index, src);
13186op.impl(self, dim, index, src, op.maybe_get_output(0));
13187if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13188return out;
13189}
13190struct structured_scatter_src_out_inplace final : public at::native::structured_scatter_src_out {
13191 structured_scatter_src_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13192 void set_output_strided(
13193 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13194 TensorOptions options, DimnameList names
13195 ) override {
13196 const auto& out = outputs_[output_idx].get();
13197 check_inplace(out, sizes, options);
13198 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13199 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13200 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13201 }
13202 if (!names.empty()) {
13203 namedinference::propagate_names(outputs_[output_idx], names);
13204 }
13205 // super must happen after, so that downstream can use maybe_get_output
13206 // to retrieve the output
13207 }
13208 void set_output_raw_strided(
13209 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13210 TensorOptions options, DimnameList names
13211 ) override {
13212 const auto& out = outputs_[output_idx].get();
13213 check_inplace(out, sizes, options);
13214 if (!names.empty()) {
13215 namedinference::propagate_names(outputs_[output_idx], names);
13216 }
13217 // super must happen after, so that downstream can use maybe_get_output
13218 // to retrieve the output
13219 }
13220 const Tensor& maybe_get_output(int64_t output_idx) override {
13221 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13222 }
13223 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13224 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13225};
13226at::Tensor & wrapper_CPU_scatter__src(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
13227structured_scatter_src_out_inplace op(self);
13228op.meta(self, dim, index, src);
13229op.impl(self, dim, index, src, op.outputs_[0]);
13230if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13231return self;
13232}
13233struct structured_scatter_value_out_functional final : public at::native::structured_scatter_value_out {
13234 void set_output_strided(
13235 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13236 TensorOptions options, DimnameList names
13237 ) override {
13238 outputs_[output_idx] = create_out(sizes, strides, options);
13239 if (!names.empty()) {
13240 namedinference::propagate_names(*outputs_[output_idx], names);
13241 }
13242 // super must happen after, so that downstream can use maybe_get_output
13243 // to retrieve the output
13244 }
13245 void set_output_raw_strided(
13246 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13247 TensorOptions options, DimnameList names
13248 ) override {
13249 outputs_[output_idx] = create_out(sizes, strides, options);
13250 if (!names.empty()) {
13251 namedinference::propagate_names(*outputs_[output_idx], names);
13252 }
13253 // super must happen after, so that downstream can use maybe_get_output
13254 // to retrieve the output
13255 }
13256 const Tensor& maybe_get_output(int64_t output_idx) override {
13257 return *outputs_[output_idx];
13258 }
13259 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13260};
13261at::Tensor wrapper_CPU_scatter_value(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
13262structured_scatter_value_out_functional op;
13263op.meta(self, dim, index, value);
13264op.impl(self, dim, index, value, *op.outputs_[0]);
13265return std::move(op.outputs_[0]).take();
13266}
13267struct structured_scatter_value_out_out final : public at::native::structured_scatter_value_out {
13268 structured_scatter_value_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
13269 void set_output_strided(
13270 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13271 TensorOptions options, DimnameList names
13272 ) override {
13273 const auto& out = outputs_[output_idx].get();
13274 resize_out(out, sizes, strides, options);
13275 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13276 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13277 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13278 }
13279 if (!names.empty()) {
13280 namedinference::propagate_names(outputs_[output_idx], names);
13281 }
13282 // super must happen after, so that downstream can use maybe_get_output
13283 // to retrieve the output
13284 }
13285 void set_output_raw_strided(
13286 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13287 TensorOptions options, DimnameList names
13288 ) override {
13289 const auto& out = outputs_[output_idx].get();
13290 resize_out(out, sizes, strides, options);
13291 if (!names.empty()) {
13292 namedinference::propagate_names(outputs_[output_idx], names);
13293 }
13294 // super must happen after, so that downstream can use maybe_get_output
13295 // to retrieve the output
13296 }
13297 const Tensor& maybe_get_output(int64_t output_idx) override {
13298 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13299 }
13300 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13301 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13302};
13303at::Tensor & wrapper_CPU_scatter_out_value_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
13304structured_scatter_value_out_out op(out);
13305op.meta(self, dim, index, value);
13306op.impl(self, dim, index, value, op.maybe_get_output(0));
13307if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13308return out;
13309}
13310struct structured_scatter_value_out_inplace final : public at::native::structured_scatter_value_out {
13311 structured_scatter_value_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13312 void set_output_strided(
13313 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13314 TensorOptions options, DimnameList names
13315 ) override {
13316 const auto& out = outputs_[output_idx].get();
13317 check_inplace(out, sizes, options);
13318 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13319 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13320 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13321 }
13322 if (!names.empty()) {
13323 namedinference::propagate_names(outputs_[output_idx], names);
13324 }
13325 // super must happen after, so that downstream can use maybe_get_output
13326 // to retrieve the output
13327 }
13328 void set_output_raw_strided(
13329 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13330 TensorOptions options, DimnameList names
13331 ) override {
13332 const auto& out = outputs_[output_idx].get();
13333 check_inplace(out, sizes, options);
13334 if (!names.empty()) {
13335 namedinference::propagate_names(outputs_[output_idx], names);
13336 }
13337 // super must happen after, so that downstream can use maybe_get_output
13338 // to retrieve the output
13339 }
13340 const Tensor& maybe_get_output(int64_t output_idx) override {
13341 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13342 }
13343 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13344 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13345};
13346at::Tensor & wrapper_CPU_scatter__value(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
13347structured_scatter_value_out_inplace op(self);
13348op.meta(self, dim, index, value);
13349op.impl(self, dim, index, value, op.outputs_[0]);
13350if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13351return self;
13352}
13353struct structured_scatter_reduce_out_functional final : public at::native::structured_scatter_reduce_out {
13354 void set_output_strided(
13355 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13356 TensorOptions options, DimnameList names
13357 ) override {
13358 outputs_[output_idx] = create_out(sizes, strides, options);
13359 if (!names.empty()) {
13360 namedinference::propagate_names(*outputs_[output_idx], names);
13361 }
13362 // super must happen after, so that downstream can use maybe_get_output
13363 // to retrieve the output
13364 }
13365 void set_output_raw_strided(
13366 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13367 TensorOptions options, DimnameList names
13368 ) override {
13369 outputs_[output_idx] = create_out(sizes, strides, options);
13370 if (!names.empty()) {
13371 namedinference::propagate_names(*outputs_[output_idx], names);
13372 }
13373 // super must happen after, so that downstream can use maybe_get_output
13374 // to retrieve the output
13375 }
13376 const Tensor& maybe_get_output(int64_t output_idx) override {
13377 return *outputs_[output_idx];
13378 }
13379 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13380};
13381at::Tensor wrapper_CPU_scatter_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
13382structured_scatter_reduce_out_functional op;
13383op.meta(self, dim, index, src, reduce);
13384op.impl(self, dim, index, src, reduce, *op.outputs_[0]);
13385return std::move(op.outputs_[0]).take();
13386}
13387struct structured_scatter_reduce_out_out final : public at::native::structured_scatter_reduce_out {
13388 structured_scatter_reduce_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
13389 void set_output_strided(
13390 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13391 TensorOptions options, DimnameList names
13392 ) override {
13393 const auto& out = outputs_[output_idx].get();
13394 resize_out(out, sizes, strides, options);
13395 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13396 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13397 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13398 }
13399 if (!names.empty()) {
13400 namedinference::propagate_names(outputs_[output_idx], names);
13401 }
13402 // super must happen after, so that downstream can use maybe_get_output
13403 // to retrieve the output
13404 }
13405 void set_output_raw_strided(
13406 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13407 TensorOptions options, DimnameList names
13408 ) override {
13409 const auto& out = outputs_[output_idx].get();
13410 resize_out(out, sizes, strides, options);
13411 if (!names.empty()) {
13412 namedinference::propagate_names(outputs_[output_idx], names);
13413 }
13414 // super must happen after, so that downstream can use maybe_get_output
13415 // to retrieve the output
13416 }
13417 const Tensor& maybe_get_output(int64_t output_idx) override {
13418 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13419 }
13420 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13421 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13422};
13423at::Tensor & wrapper_CPU_scatter_out_reduce_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) {
13424structured_scatter_reduce_out_out op(out);
13425op.meta(self, dim, index, src, reduce);
13426op.impl(self, dim, index, src, reduce, op.maybe_get_output(0));
13427if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13428return out;
13429}
13430struct structured_scatter_reduce_out_inplace final : public at::native::structured_scatter_reduce_out {
13431 structured_scatter_reduce_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13432 void set_output_strided(
13433 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13434 TensorOptions options, DimnameList names
13435 ) override {
13436 const auto& out = outputs_[output_idx].get();
13437 check_inplace(out, sizes, options);
13438 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13439 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13440 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13441 }
13442 if (!names.empty()) {
13443 namedinference::propagate_names(outputs_[output_idx], names);
13444 }
13445 // super must happen after, so that downstream can use maybe_get_output
13446 // to retrieve the output
13447 }
13448 void set_output_raw_strided(
13449 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13450 TensorOptions options, DimnameList names
13451 ) override {
13452 const auto& out = outputs_[output_idx].get();
13453 check_inplace(out, sizes, options);
13454 if (!names.empty()) {
13455 namedinference::propagate_names(outputs_[output_idx], names);
13456 }
13457 // super must happen after, so that downstream can use maybe_get_output
13458 // to retrieve the output
13459 }
13460 const Tensor& maybe_get_output(int64_t output_idx) override {
13461 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13462 }
13463 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13464 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13465};
13466at::Tensor & wrapper_CPU_scatter__reduce(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
13467structured_scatter_reduce_out_inplace op(self);
13468op.meta(self, dim, index, src, reduce);
13469op.impl(self, dim, index, src, reduce, op.outputs_[0]);
13470if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13471return self;
13472}
13473struct structured_scatter_value_reduce_out_functional final : public at::native::structured_scatter_value_reduce_out {
13474 void set_output_strided(
13475 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13476 TensorOptions options, DimnameList names
13477 ) override {
13478 outputs_[output_idx] = create_out(sizes, strides, options);
13479 if (!names.empty()) {
13480 namedinference::propagate_names(*outputs_[output_idx], names);
13481 }
13482 // super must happen after, so that downstream can use maybe_get_output
13483 // to retrieve the output
13484 }
13485 void set_output_raw_strided(
13486 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13487 TensorOptions options, DimnameList names
13488 ) override {
13489 outputs_[output_idx] = create_out(sizes, strides, options);
13490 if (!names.empty()) {
13491 namedinference::propagate_names(*outputs_[output_idx], names);
13492 }
13493 // super must happen after, so that downstream can use maybe_get_output
13494 // to retrieve the output
13495 }
13496 const Tensor& maybe_get_output(int64_t output_idx) override {
13497 return *outputs_[output_idx];
13498 }
13499 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13500};
13501at::Tensor wrapper_CPU_scatter_value_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
13502structured_scatter_value_reduce_out_functional op;
13503op.meta(self, dim, index, value, reduce);
13504op.impl(self, dim, index, value, reduce, *op.outputs_[0]);
13505return std::move(op.outputs_[0]).take();
13506}
13507struct structured_scatter_value_reduce_out_out final : public at::native::structured_scatter_value_reduce_out {
13508 structured_scatter_value_reduce_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
13509 void set_output_strided(
13510 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13511 TensorOptions options, DimnameList names
13512 ) override {
13513 const auto& out = outputs_[output_idx].get();
13514 resize_out(out, sizes, strides, options);
13515 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13516 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13517 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13518 }
13519 if (!names.empty()) {
13520 namedinference::propagate_names(outputs_[output_idx], names);
13521 }
13522 // super must happen after, so that downstream can use maybe_get_output
13523 // to retrieve the output
13524 }
13525 void set_output_raw_strided(
13526 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13527 TensorOptions options, DimnameList names
13528 ) override {
13529 const auto& out = outputs_[output_idx].get();
13530 resize_out(out, sizes, strides, options);
13531 if (!names.empty()) {
13532 namedinference::propagate_names(outputs_[output_idx], names);
13533 }
13534 // super must happen after, so that downstream can use maybe_get_output
13535 // to retrieve the output
13536 }
13537 const Tensor& maybe_get_output(int64_t output_idx) override {
13538 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13539 }
13540 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13541 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13542};
13543at::Tensor & wrapper_CPU_scatter_out_value_reduce_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) {
13544structured_scatter_value_reduce_out_out op(out);
13545op.meta(self, dim, index, value, reduce);
13546op.impl(self, dim, index, value, reduce, op.maybe_get_output(0));
13547if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13548return out;
13549}
13550struct structured_scatter_value_reduce_out_inplace final : public at::native::structured_scatter_value_reduce_out {
13551 structured_scatter_value_reduce_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13552 void set_output_strided(
13553 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13554 TensorOptions options, DimnameList names
13555 ) override {
13556 const auto& out = outputs_[output_idx].get();
13557 check_inplace(out, sizes, options);
13558 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13559 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13560 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13561 }
13562 if (!names.empty()) {
13563 namedinference::propagate_names(outputs_[output_idx], names);
13564 }
13565 // super must happen after, so that downstream can use maybe_get_output
13566 // to retrieve the output
13567 }
13568 void set_output_raw_strided(
13569 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13570 TensorOptions options, DimnameList names
13571 ) override {
13572 const auto& out = outputs_[output_idx].get();
13573 check_inplace(out, sizes, options);
13574 if (!names.empty()) {
13575 namedinference::propagate_names(outputs_[output_idx], names);
13576 }
13577 // super must happen after, so that downstream can use maybe_get_output
13578 // to retrieve the output
13579 }
13580 const Tensor& maybe_get_output(int64_t output_idx) override {
13581 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13582 }
13583 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13584 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13585};
13586at::Tensor & wrapper_CPU_scatter__value_reduce(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
13587structured_scatter_value_reduce_out_inplace op(self);
13588op.meta(self, dim, index, value, reduce);
13589op.impl(self, dim, index, value, reduce, op.outputs_[0]);
13590if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13591return self;
13592}
13593struct structured_scatter_add_functional final : public at::native::structured_scatter_add {
13594 void set_output_strided(
13595 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13596 TensorOptions options, DimnameList names
13597 ) override {
13598 outputs_[output_idx] = create_out(sizes, strides, options);
13599 if (!names.empty()) {
13600 namedinference::propagate_names(*outputs_[output_idx], names);
13601 }
13602 // super must happen after, so that downstream can use maybe_get_output
13603 // to retrieve the output
13604 }
13605 void set_output_raw_strided(
13606 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13607 TensorOptions options, DimnameList names
13608 ) override {
13609 outputs_[output_idx] = create_out(sizes, strides, options);
13610 if (!names.empty()) {
13611 namedinference::propagate_names(*outputs_[output_idx], names);
13612 }
13613 // super must happen after, so that downstream can use maybe_get_output
13614 // to retrieve the output
13615 }
13616 const Tensor& maybe_get_output(int64_t output_idx) override {
13617 return *outputs_[output_idx];
13618 }
13619 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13620};
13621at::Tensor wrapper_CPU_scatter_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
13622structured_scatter_add_functional op;
13623op.meta(self, dim, index, src);
13624op.impl(self, dim, index, src, *op.outputs_[0]);
13625return std::move(op.outputs_[0]).take();
13626}
13627struct structured_scatter_add_out final : public at::native::structured_scatter_add {
13628 structured_scatter_add_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
13629 void set_output_strided(
13630 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13631 TensorOptions options, DimnameList names
13632 ) override {
13633 const auto& out = outputs_[output_idx].get();
13634 resize_out(out, sizes, strides, options);
13635 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13636 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13637 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13638 }
13639 if (!names.empty()) {
13640 namedinference::propagate_names(outputs_[output_idx], names);
13641 }
13642 // super must happen after, so that downstream can use maybe_get_output
13643 // to retrieve the output
13644 }
13645 void set_output_raw_strided(
13646 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13647 TensorOptions options, DimnameList names
13648 ) override {
13649 const auto& out = outputs_[output_idx].get();
13650 resize_out(out, sizes, strides, options);
13651 if (!names.empty()) {
13652 namedinference::propagate_names(outputs_[output_idx], names);
13653 }
13654 // super must happen after, so that downstream can use maybe_get_output
13655 // to retrieve the output
13656 }
13657 const Tensor& maybe_get_output(int64_t output_idx) override {
13658 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13659 }
13660 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13661 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13662};
13663at::Tensor & wrapper_CPU_scatter_add_out_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
13664structured_scatter_add_out op(out);
13665op.meta(self, dim, index, src);
13666op.impl(self, dim, index, src, op.maybe_get_output(0));
13667if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13668return out;
13669}
13670struct structured_scatter_add_inplace final : public at::native::structured_scatter_add {
13671 structured_scatter_add_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13672 void set_output_strided(
13673 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13674 TensorOptions options, DimnameList names
13675 ) override {
13676 const auto& out = outputs_[output_idx].get();
13677 check_inplace(out, sizes, options);
13678 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13679 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13680 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13681 }
13682 if (!names.empty()) {
13683 namedinference::propagate_names(outputs_[output_idx], names);
13684 }
13685 // super must happen after, so that downstream can use maybe_get_output
13686 // to retrieve the output
13687 }
13688 void set_output_raw_strided(
13689 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13690 TensorOptions options, DimnameList names
13691 ) override {
13692 const auto& out = outputs_[output_idx].get();
13693 check_inplace(out, sizes, options);
13694 if (!names.empty()) {
13695 namedinference::propagate_names(outputs_[output_idx], names);
13696 }
13697 // super must happen after, so that downstream can use maybe_get_output
13698 // to retrieve the output
13699 }
13700 const Tensor& maybe_get_output(int64_t output_idx) override {
13701 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13702 }
13703 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13704 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13705};
13706at::Tensor & wrapper_CPU_scatter_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
13707structured_scatter_add_inplace op(self);
13708op.meta(self, dim, index, src);
13709op.impl(self, dim, index, src, op.outputs_[0]);
13710if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13711return self;
13712}
13713struct structured_scatter_reduce_two_functional final : public at::native::structured_scatter_reduce_two {
13714 void set_output_strided(
13715 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13716 TensorOptions options, DimnameList names
13717 ) override {
13718 outputs_[output_idx] = create_out(sizes, strides, options);
13719 if (!names.empty()) {
13720 namedinference::propagate_names(*outputs_[output_idx], names);
13721 }
13722 // super must happen after, so that downstream can use maybe_get_output
13723 // to retrieve the output
13724 }
13725 void set_output_raw_strided(
13726 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13727 TensorOptions options, DimnameList names
13728 ) override {
13729 outputs_[output_idx] = create_out(sizes, strides, options);
13730 if (!names.empty()) {
13731 namedinference::propagate_names(*outputs_[output_idx], names);
13732 }
13733 // super must happen after, so that downstream can use maybe_get_output
13734 // to retrieve the output
13735 }
13736 const Tensor& maybe_get_output(int64_t output_idx) override {
13737 return *outputs_[output_idx];
13738 }
13739 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13740};
13741at::Tensor wrapper_CPU_scatter_reduce_two(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
13742structured_scatter_reduce_two_functional op;
13743op.meta(self, dim, index, src, reduce, include_self);
13744op.impl(self, dim, index, src, reduce, include_self, *op.outputs_[0]);
13745return std::move(op.outputs_[0]).take();
13746}
13747struct structured_scatter_reduce_two_out final : public at::native::structured_scatter_reduce_two {
13748 structured_scatter_reduce_two_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
13749 void set_output_strided(
13750 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13751 TensorOptions options, DimnameList names
13752 ) override {
13753 const auto& out = outputs_[output_idx].get();
13754 resize_out(out, sizes, strides, options);
13755 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13756 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13757 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13758 }
13759 if (!names.empty()) {
13760 namedinference::propagate_names(outputs_[output_idx], names);
13761 }
13762 // super must happen after, so that downstream can use maybe_get_output
13763 // to retrieve the output
13764 }
13765 void set_output_raw_strided(
13766 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13767 TensorOptions options, DimnameList names
13768 ) override {
13769 const auto& out = outputs_[output_idx].get();
13770 resize_out(out, sizes, strides, options);
13771 if (!names.empty()) {
13772 namedinference::propagate_names(outputs_[output_idx], names);
13773 }
13774 // super must happen after, so that downstream can use maybe_get_output
13775 // to retrieve the output
13776 }
13777 const Tensor& maybe_get_output(int64_t output_idx) override {
13778 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13779 }
13780 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13781 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13782};
13783at::Tensor & wrapper_CPU_scatter_reduce_out_two_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) {
13784structured_scatter_reduce_two_out op(out);
13785op.meta(self, dim, index, src, reduce, include_self);
13786op.impl(self, dim, index, src, reduce, include_self, op.maybe_get_output(0));
13787if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13788return out;
13789}
13790struct structured_scatter_reduce_two_inplace final : public at::native::structured_scatter_reduce_two {
13791 structured_scatter_reduce_two_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13792 void set_output_strided(
13793 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13794 TensorOptions options, DimnameList names
13795 ) override {
13796 const auto& out = outputs_[output_idx].get();
13797 check_inplace(out, sizes, options);
13798 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13799 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13800 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13801 }
13802 if (!names.empty()) {
13803 namedinference::propagate_names(outputs_[output_idx], names);
13804 }
13805 // super must happen after, so that downstream can use maybe_get_output
13806 // to retrieve the output
13807 }
13808 void set_output_raw_strided(
13809 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13810 TensorOptions options, DimnameList names
13811 ) override {
13812 const auto& out = outputs_[output_idx].get();
13813 check_inplace(out, sizes, options);
13814 if (!names.empty()) {
13815 namedinference::propagate_names(outputs_[output_idx], names);
13816 }
13817 // super must happen after, so that downstream can use maybe_get_output
13818 // to retrieve the output
13819 }
13820 const Tensor& maybe_get_output(int64_t output_idx) override {
13821 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13822 }
13823 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13824 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13825};
13826at::Tensor & wrapper_CPU_scatter_reduce__two(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
13827structured_scatter_reduce_two_inplace op(self);
13828op.meta(self, dim, index, src, reduce, include_self);
13829op.impl(self, dim, index, src, reduce, include_self, op.outputs_[0]);
13830if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13831return self;
13832}
13833struct structured_eq_Scalar_out_functional final : public at::native::structured_eq_Scalar_out {
13834 void set_output_strided(
13835 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13836 TensorOptions options, DimnameList names
13837 ) override {
13838 outputs_[output_idx] = create_out(sizes, strides, options);
13839 if (!names.empty()) {
13840 namedinference::propagate_names(*outputs_[output_idx], names);
13841 }
13842 // super must happen after, so that downstream can use maybe_get_output
13843 // to retrieve the output
13844 at::native::structured_eq_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
13845 }
13846 void set_output_raw_strided(
13847 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13848 TensorOptions options, DimnameList names
13849 ) override {
13850 outputs_[output_idx] = create_out(sizes, strides, options);
13851 if (!names.empty()) {
13852 namedinference::propagate_names(*outputs_[output_idx], names);
13853 }
13854 // super must happen after, so that downstream can use maybe_get_output
13855 // to retrieve the output
13856 at::native::structured_eq_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
13857 }
13858 const Tensor& maybe_get_output(int64_t output_idx) override {
13859 return *outputs_[output_idx];
13860 }
13861 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13862};
13863at::Tensor wrapper_CPU_eq_Scalar(const at::Tensor & self, const at::Scalar & other) {
13864structured_eq_Scalar_out_functional op;
13865op.meta(self, other);
13866op.impl(self, other, *op.outputs_[0]);
13867return std::move(op.outputs_[0]).take();
13868}
13869struct structured_eq_Scalar_out_out final : public at::native::structured_eq_Scalar_out {
13870 structured_eq_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
13871 void set_output_strided(
13872 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13873 TensorOptions options, DimnameList names
13874 ) override {
13875 const auto& out = outputs_[output_idx].get();
13876 resize_out(out, sizes, strides, options);
13877 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13878 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13879 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13880 }
13881 if (!names.empty()) {
13882 namedinference::propagate_names(outputs_[output_idx], names);
13883 }
13884 // super must happen after, so that downstream can use maybe_get_output
13885 // to retrieve the output
13886 at::native::structured_eq_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
13887 }
13888 void set_output_raw_strided(
13889 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13890 TensorOptions options, DimnameList names
13891 ) override {
13892 const auto& out = outputs_[output_idx].get();
13893 resize_out(out, sizes, strides, options);
13894 if (!names.empty()) {
13895 namedinference::propagate_names(outputs_[output_idx], names);
13896 }
13897 // super must happen after, so that downstream can use maybe_get_output
13898 // to retrieve the output
13899 at::native::structured_eq_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
13900 }
13901 const Tensor& maybe_get_output(int64_t output_idx) override {
13902 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13903 }
13904 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13905 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13906};
13907at::Tensor & wrapper_CPU_eq_out_Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
13908structured_eq_Scalar_out_out op(out);
13909op.meta(self, other);
13910op.impl(self, other, op.maybe_get_output(0));
13911if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13912return out;
13913}
13914struct structured_eq_Scalar_out_inplace final : public at::native::structured_eq_Scalar_out {
13915 structured_eq_Scalar_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
13916 void set_output_strided(
13917 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13918 TensorOptions options, DimnameList names
13919 ) override {
13920 const auto& out = outputs_[output_idx].get();
13921 check_inplace(out, sizes, options);
13922 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
13923 if (C10_UNLIKELY(maybe_proxy.has_value())) {
13924 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
13925 }
13926 if (!names.empty()) {
13927 namedinference::propagate_names(outputs_[output_idx], names);
13928 }
13929 // super must happen after, so that downstream can use maybe_get_output
13930 // to retrieve the output
13931 at::native::structured_eq_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
13932 }
13933 void set_output_raw_strided(
13934 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13935 TensorOptions options, DimnameList names
13936 ) override {
13937 const auto& out = outputs_[output_idx].get();
13938 check_inplace(out, sizes, options);
13939 if (!names.empty()) {
13940 namedinference::propagate_names(outputs_[output_idx], names);
13941 }
13942 // super must happen after, so that downstream can use maybe_get_output
13943 // to retrieve the output
13944 at::native::structured_eq_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
13945 }
13946 const Tensor& maybe_get_output(int64_t output_idx) override {
13947 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
13948 }
13949 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
13950 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
13951};
13952at::Tensor & wrapper_CPU_eq__Scalar(at::Tensor & self, const at::Scalar & other) {
13953structured_eq_Scalar_out_inplace op(self);
13954op.meta(self, other);
13955op.impl(self, other, op.outputs_[0]);
13956if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
13957return self;
13958}
13959struct structured_eq_Tensor_out_functional final : public at::native::structured_eq_Tensor_out {
13960 void set_output_strided(
13961 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13962 TensorOptions options, DimnameList names
13963 ) override {
13964 outputs_[output_idx] = create_out(sizes, strides, options);
13965 if (!names.empty()) {
13966 namedinference::propagate_names(*outputs_[output_idx], names);
13967 }
13968 // super must happen after, so that downstream can use maybe_get_output
13969 // to retrieve the output
13970 at::native::structured_eq_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
13971 }
13972 void set_output_raw_strided(
13973 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13974 TensorOptions options, DimnameList names
13975 ) override {
13976 outputs_[output_idx] = create_out(sizes, strides, options);
13977 if (!names.empty()) {
13978 namedinference::propagate_names(*outputs_[output_idx], names);
13979 }
13980 // super must happen after, so that downstream can use maybe_get_output
13981 // to retrieve the output
13982 at::native::structured_eq_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
13983 }
13984 const Tensor& maybe_get_output(int64_t output_idx) override {
13985 return *outputs_[output_idx];
13986 }
13987 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
13988};
13989at::Tensor wrapper_CPU_eq_Tensor(const at::Tensor & self, const at::Tensor & other) {
13990structured_eq_Tensor_out_functional op;
13991op.meta(self, other);
13992op.impl(self, other, *op.outputs_[0]);
13993return std::move(op.outputs_[0]).take();
13994}
13995struct structured_eq_Tensor_out_out final : public at::native::structured_eq_Tensor_out {
13996 structured_eq_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
13997 void set_output_strided(
13998 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
13999 TensorOptions options, DimnameList names
14000 ) override {
14001 const auto& out = outputs_[output_idx].get();
14002 resize_out(out, sizes, strides, options);
14003 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14004 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14005 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14006 }
14007 if (!names.empty()) {
14008 namedinference::propagate_names(outputs_[output_idx], names);
14009 }
14010 // super must happen after, so that downstream can use maybe_get_output
14011 // to retrieve the output
14012 at::native::structured_eq_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14013 }
14014 void set_output_raw_strided(
14015 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14016 TensorOptions options, DimnameList names
14017 ) override {
14018 const auto& out = outputs_[output_idx].get();
14019 resize_out(out, sizes, strides, options);
14020 if (!names.empty()) {
14021 namedinference::propagate_names(outputs_[output_idx], names);
14022 }
14023 // super must happen after, so that downstream can use maybe_get_output
14024 // to retrieve the output
14025 at::native::structured_eq_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14026 }
14027 const Tensor& maybe_get_output(int64_t output_idx) override {
14028 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14029 }
14030 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14031 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14032};
14033at::Tensor & wrapper_CPU_eq_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
14034structured_eq_Tensor_out_out op(out);
14035op.meta(self, other);
14036op.impl(self, other, op.maybe_get_output(0));
14037if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14038return out;
14039}
14040struct structured_eq_Tensor_out_inplace final : public at::native::structured_eq_Tensor_out {
14041 structured_eq_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
14042 void set_output_strided(
14043 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14044 TensorOptions options, DimnameList names
14045 ) override {
14046 const auto& out = outputs_[output_idx].get();
14047 check_inplace(out, sizes, options);
14048 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14049 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14050 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14051 }
14052 if (!names.empty()) {
14053 namedinference::propagate_names(outputs_[output_idx], names);
14054 }
14055 // super must happen after, so that downstream can use maybe_get_output
14056 // to retrieve the output
14057 at::native::structured_eq_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14058 }
14059 void set_output_raw_strided(
14060 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14061 TensorOptions options, DimnameList names
14062 ) override {
14063 const auto& out = outputs_[output_idx].get();
14064 check_inplace(out, sizes, options);
14065 if (!names.empty()) {
14066 namedinference::propagate_names(outputs_[output_idx], names);
14067 }
14068 // super must happen after, so that downstream can use maybe_get_output
14069 // to retrieve the output
14070 at::native::structured_eq_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14071 }
14072 const Tensor& maybe_get_output(int64_t output_idx) override {
14073 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14074 }
14075 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14076 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14077};
14078at::Tensor & wrapper_CPU_eq__Tensor(at::Tensor & self, const at::Tensor & other) {
14079structured_eq_Tensor_out_inplace op(self);
14080op.meta(self, other);
14081op.impl(self, other, op.outputs_[0]);
14082if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14083return self;
14084}
14085struct structured_bitwise_and_out_functional final : public at::native::structured_bitwise_and_out {
14086 void set_output_strided(
14087 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14088 TensorOptions options, DimnameList names
14089 ) override {
14090 outputs_[output_idx] = create_out(sizes, strides, options);
14091 if (!names.empty()) {
14092 namedinference::propagate_names(*outputs_[output_idx], names);
14093 }
14094 // super must happen after, so that downstream can use maybe_get_output
14095 // to retrieve the output
14096 at::native::structured_bitwise_and_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14097 }
14098 void set_output_raw_strided(
14099 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14100 TensorOptions options, DimnameList names
14101 ) override {
14102 outputs_[output_idx] = create_out(sizes, strides, options);
14103 if (!names.empty()) {
14104 namedinference::propagate_names(*outputs_[output_idx], names);
14105 }
14106 // super must happen after, so that downstream can use maybe_get_output
14107 // to retrieve the output
14108 at::native::structured_bitwise_and_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14109 }
14110 const Tensor& maybe_get_output(int64_t output_idx) override {
14111 return *outputs_[output_idx];
14112 }
14113 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14114};
14115at::Tensor wrapper_CPU_bitwise_and_Tensor(const at::Tensor & self, const at::Tensor & other) {
14116structured_bitwise_and_out_functional op;
14117op.meta(self, other);
14118op.impl(self, other, *op.outputs_[0]);
14119return std::move(op.outputs_[0]).take();
14120}
14121struct structured_bitwise_and_out_out final : public at::native::structured_bitwise_and_out {
14122 structured_bitwise_and_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
14123 void set_output_strided(
14124 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14125 TensorOptions options, DimnameList names
14126 ) override {
14127 const auto& out = outputs_[output_idx].get();
14128 resize_out(out, sizes, strides, options);
14129 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14130 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14131 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14132 }
14133 if (!names.empty()) {
14134 namedinference::propagate_names(outputs_[output_idx], names);
14135 }
14136 // super must happen after, so that downstream can use maybe_get_output
14137 // to retrieve the output
14138 at::native::structured_bitwise_and_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14139 }
14140 void set_output_raw_strided(
14141 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14142 TensorOptions options, DimnameList names
14143 ) override {
14144 const auto& out = outputs_[output_idx].get();
14145 resize_out(out, sizes, strides, options);
14146 if (!names.empty()) {
14147 namedinference::propagate_names(outputs_[output_idx], names);
14148 }
14149 // super must happen after, so that downstream can use maybe_get_output
14150 // to retrieve the output
14151 at::native::structured_bitwise_and_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14152 }
14153 const Tensor& maybe_get_output(int64_t output_idx) override {
14154 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14155 }
14156 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14157 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14158};
14159at::Tensor & wrapper_CPU_bitwise_and_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
14160structured_bitwise_and_out_out op(out);
14161op.meta(self, other);
14162op.impl(self, other, op.maybe_get_output(0));
14163if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14164return out;
14165}
14166struct structured_bitwise_and_out_inplace final : public at::native::structured_bitwise_and_out {
14167 structured_bitwise_and_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
14168 void set_output_strided(
14169 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14170 TensorOptions options, DimnameList names
14171 ) override {
14172 const auto& out = outputs_[output_idx].get();
14173 check_inplace(out, sizes, options);
14174 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14175 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14176 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14177 }
14178 if (!names.empty()) {
14179 namedinference::propagate_names(outputs_[output_idx], names);
14180 }
14181 // super must happen after, so that downstream can use maybe_get_output
14182 // to retrieve the output
14183 at::native::structured_bitwise_and_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14184 }
14185 void set_output_raw_strided(
14186 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14187 TensorOptions options, DimnameList names
14188 ) override {
14189 const auto& out = outputs_[output_idx].get();
14190 check_inplace(out, sizes, options);
14191 if (!names.empty()) {
14192 namedinference::propagate_names(outputs_[output_idx], names);
14193 }
14194 // super must happen after, so that downstream can use maybe_get_output
14195 // to retrieve the output
14196 at::native::structured_bitwise_and_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14197 }
14198 const Tensor& maybe_get_output(int64_t output_idx) override {
14199 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14200 }
14201 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14202 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14203};
14204at::Tensor & wrapper_CPU_bitwise_and__Tensor(at::Tensor & self, const at::Tensor & other) {
14205structured_bitwise_and_out_inplace op(self);
14206op.meta(self, other);
14207op.impl(self, other, op.outputs_[0]);
14208if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14209return self;
14210}
14211struct structured_bitwise_or_out_functional final : public at::native::structured_bitwise_or_out {
14212 void set_output_strided(
14213 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14214 TensorOptions options, DimnameList names
14215 ) override {
14216 outputs_[output_idx] = create_out(sizes, strides, options);
14217 if (!names.empty()) {
14218 namedinference::propagate_names(*outputs_[output_idx], names);
14219 }
14220 // super must happen after, so that downstream can use maybe_get_output
14221 // to retrieve the output
14222 at::native::structured_bitwise_or_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14223 }
14224 void set_output_raw_strided(
14225 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14226 TensorOptions options, DimnameList names
14227 ) override {
14228 outputs_[output_idx] = create_out(sizes, strides, options);
14229 if (!names.empty()) {
14230 namedinference::propagate_names(*outputs_[output_idx], names);
14231 }
14232 // super must happen after, so that downstream can use maybe_get_output
14233 // to retrieve the output
14234 at::native::structured_bitwise_or_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14235 }
14236 const Tensor& maybe_get_output(int64_t output_idx) override {
14237 return *outputs_[output_idx];
14238 }
14239 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14240};
14241at::Tensor wrapper_CPU_bitwise_or_Tensor(const at::Tensor & self, const at::Tensor & other) {
14242structured_bitwise_or_out_functional op;
14243op.meta(self, other);
14244op.impl(self, other, *op.outputs_[0]);
14245return std::move(op.outputs_[0]).take();
14246}
14247struct structured_bitwise_or_out_out final : public at::native::structured_bitwise_or_out {
14248 structured_bitwise_or_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
14249 void set_output_strided(
14250 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14251 TensorOptions options, DimnameList names
14252 ) override {
14253 const auto& out = outputs_[output_idx].get();
14254 resize_out(out, sizes, strides, options);
14255 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14256 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14257 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14258 }
14259 if (!names.empty()) {
14260 namedinference::propagate_names(outputs_[output_idx], names);
14261 }
14262 // super must happen after, so that downstream can use maybe_get_output
14263 // to retrieve the output
14264 at::native::structured_bitwise_or_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14265 }
14266 void set_output_raw_strided(
14267 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14268 TensorOptions options, DimnameList names
14269 ) override {
14270 const auto& out = outputs_[output_idx].get();
14271 resize_out(out, sizes, strides, options);
14272 if (!names.empty()) {
14273 namedinference::propagate_names(outputs_[output_idx], names);
14274 }
14275 // super must happen after, so that downstream can use maybe_get_output
14276 // to retrieve the output
14277 at::native::structured_bitwise_or_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14278 }
14279 const Tensor& maybe_get_output(int64_t output_idx) override {
14280 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14281 }
14282 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14283 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14284};
14285at::Tensor & wrapper_CPU_bitwise_or_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
14286structured_bitwise_or_out_out op(out);
14287op.meta(self, other);
14288op.impl(self, other, op.maybe_get_output(0));
14289if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14290return out;
14291}
14292struct structured_bitwise_or_out_inplace final : public at::native::structured_bitwise_or_out {
14293 structured_bitwise_or_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
14294 void set_output_strided(
14295 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14296 TensorOptions options, DimnameList names
14297 ) override {
14298 const auto& out = outputs_[output_idx].get();
14299 check_inplace(out, sizes, options);
14300 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14301 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14302 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14303 }
14304 if (!names.empty()) {
14305 namedinference::propagate_names(outputs_[output_idx], names);
14306 }
14307 // super must happen after, so that downstream can use maybe_get_output
14308 // to retrieve the output
14309 at::native::structured_bitwise_or_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14310 }
14311 void set_output_raw_strided(
14312 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14313 TensorOptions options, DimnameList names
14314 ) override {
14315 const auto& out = outputs_[output_idx].get();
14316 check_inplace(out, sizes, options);
14317 if (!names.empty()) {
14318 namedinference::propagate_names(outputs_[output_idx], names);
14319 }
14320 // super must happen after, so that downstream can use maybe_get_output
14321 // to retrieve the output
14322 at::native::structured_bitwise_or_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14323 }
14324 const Tensor& maybe_get_output(int64_t output_idx) override {
14325 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14326 }
14327 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14328 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14329};
14330at::Tensor & wrapper_CPU_bitwise_or__Tensor(at::Tensor & self, const at::Tensor & other) {
14331structured_bitwise_or_out_inplace op(self);
14332op.meta(self, other);
14333op.impl(self, other, op.outputs_[0]);
14334if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14335return self;
14336}
14337struct structured_bitwise_xor_out_functional final : public at::native::structured_bitwise_xor_out {
14338 void set_output_strided(
14339 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14340 TensorOptions options, DimnameList names
14341 ) override {
14342 outputs_[output_idx] = create_out(sizes, strides, options);
14343 if (!names.empty()) {
14344 namedinference::propagate_names(*outputs_[output_idx], names);
14345 }
14346 // super must happen after, so that downstream can use maybe_get_output
14347 // to retrieve the output
14348 at::native::structured_bitwise_xor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14349 }
14350 void set_output_raw_strided(
14351 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14352 TensorOptions options, DimnameList names
14353 ) override {
14354 outputs_[output_idx] = create_out(sizes, strides, options);
14355 if (!names.empty()) {
14356 namedinference::propagate_names(*outputs_[output_idx], names);
14357 }
14358 // super must happen after, so that downstream can use maybe_get_output
14359 // to retrieve the output
14360 at::native::structured_bitwise_xor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14361 }
14362 const Tensor& maybe_get_output(int64_t output_idx) override {
14363 return *outputs_[output_idx];
14364 }
14365 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14366};
14367at::Tensor wrapper_CPU_bitwise_xor_Tensor(const at::Tensor & self, const at::Tensor & other) {
14368structured_bitwise_xor_out_functional op;
14369op.meta(self, other);
14370op.impl(self, other, *op.outputs_[0]);
14371return std::move(op.outputs_[0]).take();
14372}
14373struct structured_bitwise_xor_out_out final : public at::native::structured_bitwise_xor_out {
14374 structured_bitwise_xor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
14375 void set_output_strided(
14376 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14377 TensorOptions options, DimnameList names
14378 ) override {
14379 const auto& out = outputs_[output_idx].get();
14380 resize_out(out, sizes, strides, options);
14381 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14382 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14383 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14384 }
14385 if (!names.empty()) {
14386 namedinference::propagate_names(outputs_[output_idx], names);
14387 }
14388 // super must happen after, so that downstream can use maybe_get_output
14389 // to retrieve the output
14390 at::native::structured_bitwise_xor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14391 }
14392 void set_output_raw_strided(
14393 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14394 TensorOptions options, DimnameList names
14395 ) override {
14396 const auto& out = outputs_[output_idx].get();
14397 resize_out(out, sizes, strides, options);
14398 if (!names.empty()) {
14399 namedinference::propagate_names(outputs_[output_idx], names);
14400 }
14401 // super must happen after, so that downstream can use maybe_get_output
14402 // to retrieve the output
14403 at::native::structured_bitwise_xor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14404 }
14405 const Tensor& maybe_get_output(int64_t output_idx) override {
14406 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14407 }
14408 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14409 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14410};
14411at::Tensor & wrapper_CPU_bitwise_xor_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
14412structured_bitwise_xor_out_out op(out);
14413op.meta(self, other);
14414op.impl(self, other, op.maybe_get_output(0));
14415if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14416return out;
14417}
14418struct structured_bitwise_xor_out_inplace final : public at::native::structured_bitwise_xor_out {
14419 structured_bitwise_xor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
14420 void set_output_strided(
14421 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14422 TensorOptions options, DimnameList names
14423 ) override {
14424 const auto& out = outputs_[output_idx].get();
14425 check_inplace(out, sizes, options);
14426 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14427 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14428 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14429 }
14430 if (!names.empty()) {
14431 namedinference::propagate_names(outputs_[output_idx], names);
14432 }
14433 // super must happen after, so that downstream can use maybe_get_output
14434 // to retrieve the output
14435 at::native::structured_bitwise_xor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14436 }
14437 void set_output_raw_strided(
14438 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14439 TensorOptions options, DimnameList names
14440 ) override {
14441 const auto& out = outputs_[output_idx].get();
14442 check_inplace(out, sizes, options);
14443 if (!names.empty()) {
14444 namedinference::propagate_names(outputs_[output_idx], names);
14445 }
14446 // super must happen after, so that downstream can use maybe_get_output
14447 // to retrieve the output
14448 at::native::structured_bitwise_xor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14449 }
14450 const Tensor& maybe_get_output(int64_t output_idx) override {
14451 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14452 }
14453 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14454 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14455};
14456at::Tensor & wrapper_CPU_bitwise_xor__Tensor(at::Tensor & self, const at::Tensor & other) {
14457structured_bitwise_xor_out_inplace op(self);
14458op.meta(self, other);
14459op.impl(self, other, op.outputs_[0]);
14460if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14461return self;
14462}
14463namespace {
14464at::Tensor wrapper_CPU_Scalar___lshift__(const at::Tensor & self, const at::Scalar & other) {
14465 // No device check
14466 // DeviceGuard omitted
14467 return at::native::__lshift__(self, other);
14468}
14469} // anonymous namespace
14470namespace {
14471at::Tensor & wrapper_CPU_Scalar___ilshift__(at::Tensor & self, const at::Scalar & other) {
14472 // No device check
14473 // DeviceGuard omitted
14474 return at::native::__ilshift__(self, other);
14475}
14476} // anonymous namespace
14477namespace {
14478at::Tensor wrapper_CPU_Tensor___lshift__(const at::Tensor & self, const at::Tensor & other) {
14479 // No device check
14480 // DeviceGuard omitted
14481 return at::native::__lshift__(self, other);
14482}
14483} // anonymous namespace
14484namespace {
14485at::Tensor & wrapper_CPU_Tensor___ilshift__(at::Tensor & self, const at::Tensor & other) {
14486 // No device check
14487 // DeviceGuard omitted
14488 return at::native::__ilshift__(self, other);
14489}
14490} // anonymous namespace
14491struct structured_bitwise_left_shift_out_functional final : public at::native::structured_bitwise_left_shift_out {
14492 void set_output_strided(
14493 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14494 TensorOptions options, DimnameList names
14495 ) override {
14496 outputs_[output_idx] = create_out(sizes, strides, options);
14497 if (!names.empty()) {
14498 namedinference::propagate_names(*outputs_[output_idx], names);
14499 }
14500 // super must happen after, so that downstream can use maybe_get_output
14501 // to retrieve the output
14502 at::native::structured_bitwise_left_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14503 }
14504 void set_output_raw_strided(
14505 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14506 TensorOptions options, DimnameList names
14507 ) override {
14508 outputs_[output_idx] = create_out(sizes, strides, options);
14509 if (!names.empty()) {
14510 namedinference::propagate_names(*outputs_[output_idx], names);
14511 }
14512 // super must happen after, so that downstream can use maybe_get_output
14513 // to retrieve the output
14514 at::native::structured_bitwise_left_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14515 }
14516 const Tensor& maybe_get_output(int64_t output_idx) override {
14517 return *outputs_[output_idx];
14518 }
14519 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14520};
14521at::Tensor wrapper_CPU_bitwise_left_shift_Tensor(const at::Tensor & self, const at::Tensor & other) {
14522structured_bitwise_left_shift_out_functional op;
14523op.meta(self, other);
14524op.impl(self, other, *op.outputs_[0]);
14525return std::move(op.outputs_[0]).take();
14526}
14527struct structured_bitwise_left_shift_out_out final : public at::native::structured_bitwise_left_shift_out {
14528 structured_bitwise_left_shift_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
14529 void set_output_strided(
14530 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14531 TensorOptions options, DimnameList names
14532 ) override {
14533 const auto& out = outputs_[output_idx].get();
14534 resize_out(out, sizes, strides, options);
14535 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14536 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14537 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14538 }
14539 if (!names.empty()) {
14540 namedinference::propagate_names(outputs_[output_idx], names);
14541 }
14542 // super must happen after, so that downstream can use maybe_get_output
14543 // to retrieve the output
14544 at::native::structured_bitwise_left_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14545 }
14546 void set_output_raw_strided(
14547 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14548 TensorOptions options, DimnameList names
14549 ) override {
14550 const auto& out = outputs_[output_idx].get();
14551 resize_out(out, sizes, strides, options);
14552 if (!names.empty()) {
14553 namedinference::propagate_names(outputs_[output_idx], names);
14554 }
14555 // super must happen after, so that downstream can use maybe_get_output
14556 // to retrieve the output
14557 at::native::structured_bitwise_left_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14558 }
14559 const Tensor& maybe_get_output(int64_t output_idx) override {
14560 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14561 }
14562 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14563 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14564};
14565at::Tensor & wrapper_CPU_bitwise_left_shift_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
14566structured_bitwise_left_shift_out_out op(out);
14567op.meta(self, other);
14568op.impl(self, other, op.maybe_get_output(0));
14569if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14570return out;
14571}
14572struct structured_bitwise_left_shift_out_inplace final : public at::native::structured_bitwise_left_shift_out {
14573 structured_bitwise_left_shift_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
14574 void set_output_strided(
14575 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14576 TensorOptions options, DimnameList names
14577 ) override {
14578 const auto& out = outputs_[output_idx].get();
14579 check_inplace(out, sizes, options);
14580 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14581 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14582 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14583 }
14584 if (!names.empty()) {
14585 namedinference::propagate_names(outputs_[output_idx], names);
14586 }
14587 // super must happen after, so that downstream can use maybe_get_output
14588 // to retrieve the output
14589 at::native::structured_bitwise_left_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14590 }
14591 void set_output_raw_strided(
14592 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14593 TensorOptions options, DimnameList names
14594 ) override {
14595 const auto& out = outputs_[output_idx].get();
14596 check_inplace(out, sizes, options);
14597 if (!names.empty()) {
14598 namedinference::propagate_names(outputs_[output_idx], names);
14599 }
14600 // super must happen after, so that downstream can use maybe_get_output
14601 // to retrieve the output
14602 at::native::structured_bitwise_left_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14603 }
14604 const Tensor& maybe_get_output(int64_t output_idx) override {
14605 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14606 }
14607 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14608 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14609};
14610at::Tensor & wrapper_CPU_bitwise_left_shift__Tensor(at::Tensor & self, const at::Tensor & other) {
14611structured_bitwise_left_shift_out_inplace op(self);
14612op.meta(self, other);
14613op.impl(self, other, op.outputs_[0]);
14614if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14615return self;
14616}
14617namespace {
14618at::Tensor wrapper_CPU_Scalar___rshift__(const at::Tensor & self, const at::Scalar & other) {
14619 // No device check
14620 // DeviceGuard omitted
14621 return at::native::__rshift__(self, other);
14622}
14623} // anonymous namespace
14624namespace {
14625at::Tensor & wrapper_CPU_Scalar___irshift__(at::Tensor & self, const at::Scalar & other) {
14626 // No device check
14627 // DeviceGuard omitted
14628 return at::native::__irshift__(self, other);
14629}
14630} // anonymous namespace
14631namespace {
14632at::Tensor wrapper_CPU_Tensor___rshift__(const at::Tensor & self, const at::Tensor & other) {
14633 // No device check
14634 // DeviceGuard omitted
14635 return at::native::__rshift__(self, other);
14636}
14637} // anonymous namespace
14638namespace {
14639at::Tensor & wrapper_CPU_Tensor___irshift__(at::Tensor & self, const at::Tensor & other) {
14640 // No device check
14641 // DeviceGuard omitted
14642 return at::native::__irshift__(self, other);
14643}
14644} // anonymous namespace
14645struct structured_bitwise_right_shift_out_functional final : public at::native::structured_bitwise_right_shift_out {
14646 void set_output_strided(
14647 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14648 TensorOptions options, DimnameList names
14649 ) override {
14650 outputs_[output_idx] = create_out(sizes, strides, options);
14651 if (!names.empty()) {
14652 namedinference::propagate_names(*outputs_[output_idx], names);
14653 }
14654 // super must happen after, so that downstream can use maybe_get_output
14655 // to retrieve the output
14656 at::native::structured_bitwise_right_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14657 }
14658 void set_output_raw_strided(
14659 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14660 TensorOptions options, DimnameList names
14661 ) override {
14662 outputs_[output_idx] = create_out(sizes, strides, options);
14663 if (!names.empty()) {
14664 namedinference::propagate_names(*outputs_[output_idx], names);
14665 }
14666 // super must happen after, so that downstream can use maybe_get_output
14667 // to retrieve the output
14668 at::native::structured_bitwise_right_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14669 }
14670 const Tensor& maybe_get_output(int64_t output_idx) override {
14671 return *outputs_[output_idx];
14672 }
14673 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14674};
14675at::Tensor wrapper_CPU_bitwise_right_shift_Tensor(const at::Tensor & self, const at::Tensor & other) {
14676structured_bitwise_right_shift_out_functional op;
14677op.meta(self, other);
14678op.impl(self, other, *op.outputs_[0]);
14679return std::move(op.outputs_[0]).take();
14680}
14681struct structured_bitwise_right_shift_out_out final : public at::native::structured_bitwise_right_shift_out {
14682 structured_bitwise_right_shift_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
14683 void set_output_strided(
14684 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14685 TensorOptions options, DimnameList names
14686 ) override {
14687 const auto& out = outputs_[output_idx].get();
14688 resize_out(out, sizes, strides, options);
14689 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14690 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14691 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14692 }
14693 if (!names.empty()) {
14694 namedinference::propagate_names(outputs_[output_idx], names);
14695 }
14696 // super must happen after, so that downstream can use maybe_get_output
14697 // to retrieve the output
14698 at::native::structured_bitwise_right_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14699 }
14700 void set_output_raw_strided(
14701 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14702 TensorOptions options, DimnameList names
14703 ) override {
14704 const auto& out = outputs_[output_idx].get();
14705 resize_out(out, sizes, strides, options);
14706 if (!names.empty()) {
14707 namedinference::propagate_names(outputs_[output_idx], names);
14708 }
14709 // super must happen after, so that downstream can use maybe_get_output
14710 // to retrieve the output
14711 at::native::structured_bitwise_right_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14712 }
14713 const Tensor& maybe_get_output(int64_t output_idx) override {
14714 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14715 }
14716 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14717 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14718};
14719at::Tensor & wrapper_CPU_bitwise_right_shift_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
14720structured_bitwise_right_shift_out_out op(out);
14721op.meta(self, other);
14722op.impl(self, other, op.maybe_get_output(0));
14723if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14724return out;
14725}
14726struct structured_bitwise_right_shift_out_inplace final : public at::native::structured_bitwise_right_shift_out {
14727 structured_bitwise_right_shift_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
14728 void set_output_strided(
14729 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14730 TensorOptions options, DimnameList names
14731 ) override {
14732 const auto& out = outputs_[output_idx].get();
14733 check_inplace(out, sizes, options);
14734 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14735 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14736 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14737 }
14738 if (!names.empty()) {
14739 namedinference::propagate_names(outputs_[output_idx], names);
14740 }
14741 // super must happen after, so that downstream can use maybe_get_output
14742 // to retrieve the output
14743 at::native::structured_bitwise_right_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14744 }
14745 void set_output_raw_strided(
14746 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14747 TensorOptions options, DimnameList names
14748 ) override {
14749 const auto& out = outputs_[output_idx].get();
14750 check_inplace(out, sizes, options);
14751 if (!names.empty()) {
14752 namedinference::propagate_names(outputs_[output_idx], names);
14753 }
14754 // super must happen after, so that downstream can use maybe_get_output
14755 // to retrieve the output
14756 at::native::structured_bitwise_right_shift_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
14757 }
14758 const Tensor& maybe_get_output(int64_t output_idx) override {
14759 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14760 }
14761 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14762 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14763};
14764at::Tensor & wrapper_CPU_bitwise_right_shift__Tensor(at::Tensor & self, const at::Tensor & other) {
14765structured_bitwise_right_shift_out_inplace op(self);
14766op.meta(self, other);
14767op.impl(self, other, op.outputs_[0]);
14768if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14769return self;
14770}
14771struct structured_tril_cpu_functional final : public at::native::structured_tril_cpu {
14772 void set_output_strided(
14773 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14774 TensorOptions options, DimnameList names
14775 ) override {
14776 outputs_[output_idx] = create_out(sizes, strides, options);
14777 if (!names.empty()) {
14778 namedinference::propagate_names(*outputs_[output_idx], names);
14779 }
14780 // super must happen after, so that downstream can use maybe_get_output
14781 // to retrieve the output
14782 }
14783 void set_output_raw_strided(
14784 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14785 TensorOptions options, DimnameList names
14786 ) override {
14787 outputs_[output_idx] = create_out(sizes, strides, options);
14788 if (!names.empty()) {
14789 namedinference::propagate_names(*outputs_[output_idx], names);
14790 }
14791 // super must happen after, so that downstream can use maybe_get_output
14792 // to retrieve the output
14793 }
14794 const Tensor& maybe_get_output(int64_t output_idx) override {
14795 return *outputs_[output_idx];
14796 }
14797 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14798};
14799at::Tensor wrapper_CPU_tril(const at::Tensor & self, int64_t diagonal) {
14800structured_tril_cpu_functional op;
14801op.meta(self, diagonal);
14802op.impl(self, diagonal, *op.outputs_[0]);
14803return std::move(op.outputs_[0]).take();
14804}
14805struct structured_tril_cpu_out final : public at::native::structured_tril_cpu {
14806 structured_tril_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
14807 void set_output_strided(
14808 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14809 TensorOptions options, DimnameList names
14810 ) override {
14811 const auto& out = outputs_[output_idx].get();
14812 resize_out(out, sizes, strides, options);
14813 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14814 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14815 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14816 }
14817 if (!names.empty()) {
14818 namedinference::propagate_names(outputs_[output_idx], names);
14819 }
14820 // super must happen after, so that downstream can use maybe_get_output
14821 // to retrieve the output
14822 }
14823 void set_output_raw_strided(
14824 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14825 TensorOptions options, DimnameList names
14826 ) override {
14827 const auto& out = outputs_[output_idx].get();
14828 resize_out(out, sizes, strides, options);
14829 if (!names.empty()) {
14830 namedinference::propagate_names(outputs_[output_idx], names);
14831 }
14832 // super must happen after, so that downstream can use maybe_get_output
14833 // to retrieve the output
14834 }
14835 const Tensor& maybe_get_output(int64_t output_idx) override {
14836 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14837 }
14838 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14839 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14840};
14841at::Tensor & wrapper_CPU_tril_out_out(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
14842structured_tril_cpu_out op(out);
14843op.meta(self, diagonal);
14844op.impl(self, diagonal, op.maybe_get_output(0));
14845if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14846return out;
14847}
14848struct structured_tril_cpu_inplace final : public at::native::structured_tril_cpu {
14849 structured_tril_cpu_inplace(Tensor& self) : outputs_{std::ref(self)} {}
14850 void set_output_strided(
14851 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14852 TensorOptions options, DimnameList names
14853 ) override {
14854 const auto& out = outputs_[output_idx].get();
14855 check_inplace(out, sizes, options);
14856 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14857 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14858 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14859 }
14860 if (!names.empty()) {
14861 namedinference::propagate_names(outputs_[output_idx], names);
14862 }
14863 // super must happen after, so that downstream can use maybe_get_output
14864 // to retrieve the output
14865 }
14866 void set_output_raw_strided(
14867 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14868 TensorOptions options, DimnameList names
14869 ) override {
14870 const auto& out = outputs_[output_idx].get();
14871 check_inplace(out, sizes, options);
14872 if (!names.empty()) {
14873 namedinference::propagate_names(outputs_[output_idx], names);
14874 }
14875 // super must happen after, so that downstream can use maybe_get_output
14876 // to retrieve the output
14877 }
14878 const Tensor& maybe_get_output(int64_t output_idx) override {
14879 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14880 }
14881 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14882 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14883};
14884at::Tensor & wrapper_CPU_tril_(at::Tensor & self, int64_t diagonal) {
14885structured_tril_cpu_inplace op(self);
14886op.meta(self, diagonal);
14887op.impl(self, diagonal, op.outputs_[0]);
14888if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14889return self;
14890}
14891struct structured_triu_cpu_functional final : public at::native::structured_triu_cpu {
14892 void set_output_strided(
14893 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14894 TensorOptions options, DimnameList names
14895 ) override {
14896 outputs_[output_idx] = create_out(sizes, strides, options);
14897 if (!names.empty()) {
14898 namedinference::propagate_names(*outputs_[output_idx], names);
14899 }
14900 // super must happen after, so that downstream can use maybe_get_output
14901 // to retrieve the output
14902 }
14903 void set_output_raw_strided(
14904 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14905 TensorOptions options, DimnameList names
14906 ) override {
14907 outputs_[output_idx] = create_out(sizes, strides, options);
14908 if (!names.empty()) {
14909 namedinference::propagate_names(*outputs_[output_idx], names);
14910 }
14911 // super must happen after, so that downstream can use maybe_get_output
14912 // to retrieve the output
14913 }
14914 const Tensor& maybe_get_output(int64_t output_idx) override {
14915 return *outputs_[output_idx];
14916 }
14917 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
14918};
14919at::Tensor wrapper_CPU_triu(const at::Tensor & self, int64_t diagonal) {
14920structured_triu_cpu_functional op;
14921op.meta(self, diagonal);
14922op.impl(self, diagonal, *op.outputs_[0]);
14923return std::move(op.outputs_[0]).take();
14924}
14925struct structured_triu_cpu_out final : public at::native::structured_triu_cpu {
14926 structured_triu_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
14927 void set_output_strided(
14928 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14929 TensorOptions options, DimnameList names
14930 ) override {
14931 const auto& out = outputs_[output_idx].get();
14932 resize_out(out, sizes, strides, options);
14933 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14934 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14935 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14936 }
14937 if (!names.empty()) {
14938 namedinference::propagate_names(outputs_[output_idx], names);
14939 }
14940 // super must happen after, so that downstream can use maybe_get_output
14941 // to retrieve the output
14942 }
14943 void set_output_raw_strided(
14944 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14945 TensorOptions options, DimnameList names
14946 ) override {
14947 const auto& out = outputs_[output_idx].get();
14948 resize_out(out, sizes, strides, options);
14949 if (!names.empty()) {
14950 namedinference::propagate_names(outputs_[output_idx], names);
14951 }
14952 // super must happen after, so that downstream can use maybe_get_output
14953 // to retrieve the output
14954 }
14955 const Tensor& maybe_get_output(int64_t output_idx) override {
14956 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
14957 }
14958 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
14959 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
14960};
14961at::Tensor & wrapper_CPU_triu_out_out(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
14962structured_triu_cpu_out op(out);
14963op.meta(self, diagonal);
14964op.impl(self, diagonal, op.maybe_get_output(0));
14965if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
14966return out;
14967}
14968struct structured_triu_cpu_inplace final : public at::native::structured_triu_cpu {
14969 structured_triu_cpu_inplace(Tensor& self) : outputs_{std::ref(self)} {}
14970 void set_output_strided(
14971 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14972 TensorOptions options, DimnameList names
14973 ) override {
14974 const auto& out = outputs_[output_idx].get();
14975 check_inplace(out, sizes, options);
14976 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
14977 if (C10_UNLIKELY(maybe_proxy.has_value())) {
14978 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
14979 }
14980 if (!names.empty()) {
14981 namedinference::propagate_names(outputs_[output_idx], names);
14982 }
14983 // super must happen after, so that downstream can use maybe_get_output
14984 // to retrieve the output
14985 }
14986 void set_output_raw_strided(
14987 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
14988 TensorOptions options, DimnameList names
14989 ) override {
14990 const auto& out = outputs_[output_idx].get();
14991 check_inplace(out, sizes, options);
14992 if (!names.empty()) {
14993 namedinference::propagate_names(outputs_[output_idx], names);
14994 }
14995 // super must happen after, so that downstream can use maybe_get_output
14996 // to retrieve the output
14997 }
14998 const Tensor& maybe_get_output(int64_t output_idx) override {
14999 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15000 }
15001 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15002 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15003};
15004at::Tensor & wrapper_CPU_triu_(at::Tensor & self, int64_t diagonal) {
15005structured_triu_cpu_inplace op(self);
15006op.meta(self, diagonal);
15007op.impl(self, diagonal, op.outputs_[0]);
15008if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15009return self;
15010}
15011struct structured_digamma_out_functional final : public at::native::structured_digamma_out {
15012 void set_output_strided(
15013 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15014 TensorOptions options, DimnameList names
15015 ) override {
15016 outputs_[output_idx] = create_out(sizes, strides, options);
15017 if (!names.empty()) {
15018 namedinference::propagate_names(*outputs_[output_idx], names);
15019 }
15020 // super must happen after, so that downstream can use maybe_get_output
15021 // to retrieve the output
15022 at::native::structured_digamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15023 }
15024 void set_output_raw_strided(
15025 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15026 TensorOptions options, DimnameList names
15027 ) override {
15028 outputs_[output_idx] = create_out(sizes, strides, options);
15029 if (!names.empty()) {
15030 namedinference::propagate_names(*outputs_[output_idx], names);
15031 }
15032 // super must happen after, so that downstream can use maybe_get_output
15033 // to retrieve the output
15034 at::native::structured_digamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15035 }
15036 const Tensor& maybe_get_output(int64_t output_idx) override {
15037 return *outputs_[output_idx];
15038 }
15039 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15040};
15041at::Tensor wrapper_CPU_digamma(const at::Tensor & self) {
15042structured_digamma_out_functional op;
15043op.meta(self);
15044op.impl(self, *op.outputs_[0]);
15045return std::move(op.outputs_[0]).take();
15046}
15047struct structured_digamma_out_out final : public at::native::structured_digamma_out {
15048 structured_digamma_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
15049 void set_output_strided(
15050 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15051 TensorOptions options, DimnameList names
15052 ) override {
15053 const auto& out = outputs_[output_idx].get();
15054 resize_out(out, sizes, strides, options);
15055 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15056 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15057 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15058 }
15059 if (!names.empty()) {
15060 namedinference::propagate_names(outputs_[output_idx], names);
15061 }
15062 // super must happen after, so that downstream can use maybe_get_output
15063 // to retrieve the output
15064 at::native::structured_digamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15065 }
15066 void set_output_raw_strided(
15067 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15068 TensorOptions options, DimnameList names
15069 ) override {
15070 const auto& out = outputs_[output_idx].get();
15071 resize_out(out, sizes, strides, options);
15072 if (!names.empty()) {
15073 namedinference::propagate_names(outputs_[output_idx], names);
15074 }
15075 // super must happen after, so that downstream can use maybe_get_output
15076 // to retrieve the output
15077 at::native::structured_digamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15078 }
15079 const Tensor& maybe_get_output(int64_t output_idx) override {
15080 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15081 }
15082 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15083 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15084};
15085at::Tensor & wrapper_CPU_digamma_out_out(const at::Tensor & self, at::Tensor & out) {
15086structured_digamma_out_out op(out);
15087op.meta(self);
15088op.impl(self, op.maybe_get_output(0));
15089if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15090return out;
15091}
15092struct structured_digamma_out_inplace final : public at::native::structured_digamma_out {
15093 structured_digamma_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
15094 void set_output_strided(
15095 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15096 TensorOptions options, DimnameList names
15097 ) override {
15098 const auto& out = outputs_[output_idx].get();
15099 check_inplace(out, sizes, options);
15100 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15101 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15102 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15103 }
15104 if (!names.empty()) {
15105 namedinference::propagate_names(outputs_[output_idx], names);
15106 }
15107 // super must happen after, so that downstream can use maybe_get_output
15108 // to retrieve the output
15109 at::native::structured_digamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15110 }
15111 void set_output_raw_strided(
15112 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15113 TensorOptions options, DimnameList names
15114 ) override {
15115 const auto& out = outputs_[output_idx].get();
15116 check_inplace(out, sizes, options);
15117 if (!names.empty()) {
15118 namedinference::propagate_names(outputs_[output_idx], names);
15119 }
15120 // super must happen after, so that downstream can use maybe_get_output
15121 // to retrieve the output
15122 at::native::structured_digamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15123 }
15124 const Tensor& maybe_get_output(int64_t output_idx) override {
15125 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15126 }
15127 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15128 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15129};
15130at::Tensor & wrapper_CPU_digamma_(at::Tensor & self) {
15131structured_digamma_out_inplace op(self);
15132op.meta(self);
15133op.impl(self, op.outputs_[0]);
15134if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15135return self;
15136}
15137struct structured_lerp_Scalar_functional final : public at::native::structured_lerp_Scalar {
15138 void set_output_strided(
15139 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15140 TensorOptions options, DimnameList names
15141 ) override {
15142 outputs_[output_idx] = create_out(sizes, strides, options);
15143 if (!names.empty()) {
15144 namedinference::propagate_names(*outputs_[output_idx], names);
15145 }
15146 // super must happen after, so that downstream can use maybe_get_output
15147 // to retrieve the output
15148 at::native::structured_lerp_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
15149 }
15150 void set_output_raw_strided(
15151 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15152 TensorOptions options, DimnameList names
15153 ) override {
15154 outputs_[output_idx] = create_out(sizes, strides, options);
15155 if (!names.empty()) {
15156 namedinference::propagate_names(*outputs_[output_idx], names);
15157 }
15158 // super must happen after, so that downstream can use maybe_get_output
15159 // to retrieve the output
15160 at::native::structured_lerp_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
15161 }
15162 const Tensor& maybe_get_output(int64_t output_idx) override {
15163 return *outputs_[output_idx];
15164 }
15165 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15166};
15167at::Tensor wrapper_CPU_lerp_Scalar(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
15168structured_lerp_Scalar_functional op;
15169op.meta(self, end, weight);
15170op.impl(self, end, weight, *op.outputs_[0]);
15171return std::move(op.outputs_[0]).take();
15172}
15173struct structured_lerp_Scalar_out final : public at::native::structured_lerp_Scalar {
15174 structured_lerp_Scalar_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
15175 void set_output_strided(
15176 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15177 TensorOptions options, DimnameList names
15178 ) override {
15179 const auto& out = outputs_[output_idx].get();
15180 resize_out(out, sizes, strides, options);
15181 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15182 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15183 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15184 }
15185 if (!names.empty()) {
15186 namedinference::propagate_names(outputs_[output_idx], names);
15187 }
15188 // super must happen after, so that downstream can use maybe_get_output
15189 // to retrieve the output
15190 at::native::structured_lerp_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
15191 }
15192 void set_output_raw_strided(
15193 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15194 TensorOptions options, DimnameList names
15195 ) override {
15196 const auto& out = outputs_[output_idx].get();
15197 resize_out(out, sizes, strides, options);
15198 if (!names.empty()) {
15199 namedinference::propagate_names(outputs_[output_idx], names);
15200 }
15201 // super must happen after, so that downstream can use maybe_get_output
15202 // to retrieve the output
15203 at::native::structured_lerp_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
15204 }
15205 const Tensor& maybe_get_output(int64_t output_idx) override {
15206 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15207 }
15208 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15209 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15210};
15211at::Tensor & wrapper_CPU_lerp_out_Scalar_out(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) {
15212structured_lerp_Scalar_out op(out);
15213op.meta(self, end, weight);
15214op.impl(self, end, weight, op.maybe_get_output(0));
15215if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15216return out;
15217}
15218struct structured_lerp_Scalar_inplace final : public at::native::structured_lerp_Scalar {
15219 structured_lerp_Scalar_inplace(Tensor& self) : outputs_{std::ref(self)} {}
15220 void set_output_strided(
15221 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15222 TensorOptions options, DimnameList names
15223 ) override {
15224 const auto& out = outputs_[output_idx].get();
15225 check_inplace(out, sizes, options);
15226 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15227 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15228 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15229 }
15230 if (!names.empty()) {
15231 namedinference::propagate_names(outputs_[output_idx], names);
15232 }
15233 // super must happen after, so that downstream can use maybe_get_output
15234 // to retrieve the output
15235 at::native::structured_lerp_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
15236 }
15237 void set_output_raw_strided(
15238 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15239 TensorOptions options, DimnameList names
15240 ) override {
15241 const auto& out = outputs_[output_idx].get();
15242 check_inplace(out, sizes, options);
15243 if (!names.empty()) {
15244 namedinference::propagate_names(outputs_[output_idx], names);
15245 }
15246 // super must happen after, so that downstream can use maybe_get_output
15247 // to retrieve the output
15248 at::native::structured_lerp_Scalar::set_output_raw_strided(output_idx, sizes, strides, options, names);
15249 }
15250 const Tensor& maybe_get_output(int64_t output_idx) override {
15251 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15252 }
15253 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15254 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15255};
15256at::Tensor & wrapper_CPU_lerp__Scalar(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
15257structured_lerp_Scalar_inplace op(self);
15258op.meta(self, end, weight);
15259op.impl(self, end, weight, op.outputs_[0]);
15260if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15261return self;
15262}
15263struct structured_lerp_Tensor_functional final : public at::native::structured_lerp_Tensor {
15264 void set_output_strided(
15265 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15266 TensorOptions options, DimnameList names
15267 ) override {
15268 outputs_[output_idx] = create_out(sizes, strides, options);
15269 if (!names.empty()) {
15270 namedinference::propagate_names(*outputs_[output_idx], names);
15271 }
15272 // super must happen after, so that downstream can use maybe_get_output
15273 // to retrieve the output
15274 at::native::structured_lerp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
15275 }
15276 void set_output_raw_strided(
15277 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15278 TensorOptions options, DimnameList names
15279 ) override {
15280 outputs_[output_idx] = create_out(sizes, strides, options);
15281 if (!names.empty()) {
15282 namedinference::propagate_names(*outputs_[output_idx], names);
15283 }
15284 // super must happen after, so that downstream can use maybe_get_output
15285 // to retrieve the output
15286 at::native::structured_lerp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
15287 }
15288 const Tensor& maybe_get_output(int64_t output_idx) override {
15289 return *outputs_[output_idx];
15290 }
15291 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15292};
15293at::Tensor wrapper_CPU_lerp_Tensor(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
15294structured_lerp_Tensor_functional op;
15295op.meta(self, end, weight);
15296op.impl(self, end, weight, *op.outputs_[0]);
15297return std::move(op.outputs_[0]).take();
15298}
15299struct structured_lerp_Tensor_out final : public at::native::structured_lerp_Tensor {
15300 structured_lerp_Tensor_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
15301 void set_output_strided(
15302 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15303 TensorOptions options, DimnameList names
15304 ) override {
15305 const auto& out = outputs_[output_idx].get();
15306 resize_out(out, sizes, strides, options);
15307 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15308 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15309 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15310 }
15311 if (!names.empty()) {
15312 namedinference::propagate_names(outputs_[output_idx], names);
15313 }
15314 // super must happen after, so that downstream can use maybe_get_output
15315 // to retrieve the output
15316 at::native::structured_lerp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
15317 }
15318 void set_output_raw_strided(
15319 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15320 TensorOptions options, DimnameList names
15321 ) override {
15322 const auto& out = outputs_[output_idx].get();
15323 resize_out(out, sizes, strides, options);
15324 if (!names.empty()) {
15325 namedinference::propagate_names(outputs_[output_idx], names);
15326 }
15327 // super must happen after, so that downstream can use maybe_get_output
15328 // to retrieve the output
15329 at::native::structured_lerp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
15330 }
15331 const Tensor& maybe_get_output(int64_t output_idx) override {
15332 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15333 }
15334 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15335 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15336};
15337at::Tensor & wrapper_CPU_lerp_out_Tensor_out(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) {
15338structured_lerp_Tensor_out op(out);
15339op.meta(self, end, weight);
15340op.impl(self, end, weight, op.maybe_get_output(0));
15341if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15342return out;
15343}
15344struct structured_lerp_Tensor_inplace final : public at::native::structured_lerp_Tensor {
15345 structured_lerp_Tensor_inplace(Tensor& self) : outputs_{std::ref(self)} {}
15346 void set_output_strided(
15347 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15348 TensorOptions options, DimnameList names
15349 ) override {
15350 const auto& out = outputs_[output_idx].get();
15351 check_inplace(out, sizes, options);
15352 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15353 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15354 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15355 }
15356 if (!names.empty()) {
15357 namedinference::propagate_names(outputs_[output_idx], names);
15358 }
15359 // super must happen after, so that downstream can use maybe_get_output
15360 // to retrieve the output
15361 at::native::structured_lerp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
15362 }
15363 void set_output_raw_strided(
15364 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15365 TensorOptions options, DimnameList names
15366 ) override {
15367 const auto& out = outputs_[output_idx].get();
15368 check_inplace(out, sizes, options);
15369 if (!names.empty()) {
15370 namedinference::propagate_names(outputs_[output_idx], names);
15371 }
15372 // super must happen after, so that downstream can use maybe_get_output
15373 // to retrieve the output
15374 at::native::structured_lerp_Tensor::set_output_raw_strided(output_idx, sizes, strides, options, names);
15375 }
15376 const Tensor& maybe_get_output(int64_t output_idx) override {
15377 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15378 }
15379 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15380 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15381};
15382at::Tensor & wrapper_CPU_lerp__Tensor(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
15383structured_lerp_Tensor_inplace op(self);
15384op.meta(self, end, weight);
15385op.impl(self, end, weight, op.outputs_[0]);
15386if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15387return self;
15388}
15389namespace {
15390at::Tensor wrapper_CPU__addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
15391 // No device check
15392 // DeviceGuard omitted
15393 return at::native::addbmm(self, batch1, batch2, beta, alpha);
15394}
15395} // anonymous namespace
15396namespace {
15397at::Tensor & wrapper_CPU_out_addbmm_out(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
15398 // No device check
15399 // DeviceGuard omitted
15400 return at::native::addbmm_out(self, batch1, batch2, beta, alpha, out);
15401}
15402} // anonymous namespace
15403namespace {
15404at::Tensor & wrapper_CPU__addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
15405 // No device check
15406 // DeviceGuard omitted
15407 return at::native::addbmm_(self, batch1, batch2, beta, alpha);
15408}
15409} // anonymous namespace
15410namespace {
15411at::Tensor & wrapper_CPU_from_random_(at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
15412 // No device check
15413 // DeviceGuard omitted
15414 return at::native::random_(self, from, to, generator);
15415}
15416} // anonymous namespace
15417namespace {
15418at::Tensor & wrapper_CPU_to_random_(at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
15419 // No device check
15420 // DeviceGuard omitted
15421 return at::native::random_(self, to, generator);
15422}
15423} // anonymous namespace
15424namespace {
15425at::Tensor & wrapper_CPU__random_(at::Tensor & self, c10::optional<at::Generator> generator) {
15426 // No device check
15427 // DeviceGuard omitted
15428 return at::native::random_(self, generator);
15429}
15430} // anonymous namespace
15431namespace {
15432at::Tensor & wrapper_CPU__uniform_(at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) {
15433 // No device check
15434 // DeviceGuard omitted
15435 return at::native::uniform_(self, from, to, generator);
15436}
15437} // anonymous namespace
15438namespace {
15439at::Tensor & wrapper_CPU__cauchy_(at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
15440 // No device check
15441 // DeviceGuard omitted
15442 return at::native::cauchy_(self, median, sigma, generator);
15443}
15444} // anonymous namespace
15445namespace {
15446at::Tensor & wrapper_CPU__log_normal_(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
15447 // No device check
15448 // DeviceGuard omitted
15449 return at::native::log_normal_(self, mean, std, generator);
15450}
15451} // anonymous namespace
15452namespace {
15453at::Tensor & wrapper_CPU__exponential_(at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
15454 // No device check
15455 // DeviceGuard omitted
15456 return at::native::exponential_(self, lambd, generator);
15457}
15458} // anonymous namespace
15459namespace {
15460at::Tensor & wrapper_CPU__geometric_(at::Tensor & self, double p, c10::optional<at::Generator> generator) {
15461 // No device check
15462 // DeviceGuard omitted
15463 return at::native::geometric_(self, p, generator);
15464}
15465} // anonymous namespace
15466namespace {
15467at::Tensor wrapper_CPU__tril_indices(int64_t row, int64_t col, int64_t offset, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
15468 // No device check
15469 // DeviceGuard omitted
15470 return at::native::tril_indices_cpu(row, col, offset, dtype, layout, device, pin_memory);
15471}
15472} // anonymous namespace
15473namespace {
15474at::Tensor wrapper_CPU__triu_indices(int64_t row, int64_t col, int64_t offset, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
15475 // No device check
15476 // DeviceGuard omitted
15477 return at::native::triu_indices_cpu(row, col, offset, dtype, layout, device, pin_memory);
15478}
15479} // anonymous namespace
15480namespace {
15481at::Tensor wrapper_CPU__trace(const at::Tensor & self) {
15482 // No device check
15483 // DeviceGuard omitted
15484 return at::native::trace_cpu(self);
15485}
15486} // anonymous namespace
15487struct structured_ne_Scalar_out_functional final : public at::native::structured_ne_Scalar_out {
15488 void set_output_strided(
15489 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15490 TensorOptions options, DimnameList names
15491 ) override {
15492 outputs_[output_idx] = create_out(sizes, strides, options);
15493 if (!names.empty()) {
15494 namedinference::propagate_names(*outputs_[output_idx], names);
15495 }
15496 // super must happen after, so that downstream can use maybe_get_output
15497 // to retrieve the output
15498 at::native::structured_ne_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15499 }
15500 void set_output_raw_strided(
15501 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15502 TensorOptions options, DimnameList names
15503 ) override {
15504 outputs_[output_idx] = create_out(sizes, strides, options);
15505 if (!names.empty()) {
15506 namedinference::propagate_names(*outputs_[output_idx], names);
15507 }
15508 // super must happen after, so that downstream can use maybe_get_output
15509 // to retrieve the output
15510 at::native::structured_ne_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15511 }
15512 const Tensor& maybe_get_output(int64_t output_idx) override {
15513 return *outputs_[output_idx];
15514 }
15515 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15516};
15517at::Tensor wrapper_CPU_ne_Scalar(const at::Tensor & self, const at::Scalar & other) {
15518structured_ne_Scalar_out_functional op;
15519op.meta(self, other);
15520op.impl(self, other, *op.outputs_[0]);
15521return std::move(op.outputs_[0]).take();
15522}
15523struct structured_ne_Scalar_out_out final : public at::native::structured_ne_Scalar_out {
15524 structured_ne_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
15525 void set_output_strided(
15526 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15527 TensorOptions options, DimnameList names
15528 ) override {
15529 const auto& out = outputs_[output_idx].get();
15530 resize_out(out, sizes, strides, options);
15531 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15532 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15533 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15534 }
15535 if (!names.empty()) {
15536 namedinference::propagate_names(outputs_[output_idx], names);
15537 }
15538 // super must happen after, so that downstream can use maybe_get_output
15539 // to retrieve the output
15540 at::native::structured_ne_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15541 }
15542 void set_output_raw_strided(
15543 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15544 TensorOptions options, DimnameList names
15545 ) override {
15546 const auto& out = outputs_[output_idx].get();
15547 resize_out(out, sizes, strides, options);
15548 if (!names.empty()) {
15549 namedinference::propagate_names(outputs_[output_idx], names);
15550 }
15551 // super must happen after, so that downstream can use maybe_get_output
15552 // to retrieve the output
15553 at::native::structured_ne_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15554 }
15555 const Tensor& maybe_get_output(int64_t output_idx) override {
15556 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15557 }
15558 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15559 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15560};
15561at::Tensor & wrapper_CPU_ne_out_Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
15562structured_ne_Scalar_out_out op(out);
15563op.meta(self, other);
15564op.impl(self, other, op.maybe_get_output(0));
15565if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15566return out;
15567}
15568struct structured_ne_Scalar_out_inplace final : public at::native::structured_ne_Scalar_out {
15569 structured_ne_Scalar_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
15570 void set_output_strided(
15571 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15572 TensorOptions options, DimnameList names
15573 ) override {
15574 const auto& out = outputs_[output_idx].get();
15575 check_inplace(out, sizes, options);
15576 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15577 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15578 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15579 }
15580 if (!names.empty()) {
15581 namedinference::propagate_names(outputs_[output_idx], names);
15582 }
15583 // super must happen after, so that downstream can use maybe_get_output
15584 // to retrieve the output
15585 at::native::structured_ne_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15586 }
15587 void set_output_raw_strided(
15588 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15589 TensorOptions options, DimnameList names
15590 ) override {
15591 const auto& out = outputs_[output_idx].get();
15592 check_inplace(out, sizes, options);
15593 if (!names.empty()) {
15594 namedinference::propagate_names(outputs_[output_idx], names);
15595 }
15596 // super must happen after, so that downstream can use maybe_get_output
15597 // to retrieve the output
15598 at::native::structured_ne_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15599 }
15600 const Tensor& maybe_get_output(int64_t output_idx) override {
15601 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15602 }
15603 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15604 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15605};
15606at::Tensor & wrapper_CPU_ne__Scalar(at::Tensor & self, const at::Scalar & other) {
15607structured_ne_Scalar_out_inplace op(self);
15608op.meta(self, other);
15609op.impl(self, other, op.outputs_[0]);
15610if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15611return self;
15612}
15613struct structured_ne_Tensor_out_functional final : public at::native::structured_ne_Tensor_out {
15614 void set_output_strided(
15615 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15616 TensorOptions options, DimnameList names
15617 ) override {
15618 outputs_[output_idx] = create_out(sizes, strides, options);
15619 if (!names.empty()) {
15620 namedinference::propagate_names(*outputs_[output_idx], names);
15621 }
15622 // super must happen after, so that downstream can use maybe_get_output
15623 // to retrieve the output
15624 at::native::structured_ne_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15625 }
15626 void set_output_raw_strided(
15627 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15628 TensorOptions options, DimnameList names
15629 ) override {
15630 outputs_[output_idx] = create_out(sizes, strides, options);
15631 if (!names.empty()) {
15632 namedinference::propagate_names(*outputs_[output_idx], names);
15633 }
15634 // super must happen after, so that downstream can use maybe_get_output
15635 // to retrieve the output
15636 at::native::structured_ne_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15637 }
15638 const Tensor& maybe_get_output(int64_t output_idx) override {
15639 return *outputs_[output_idx];
15640 }
15641 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15642};
15643at::Tensor wrapper_CPU_ne_Tensor(const at::Tensor & self, const at::Tensor & other) {
15644structured_ne_Tensor_out_functional op;
15645op.meta(self, other);
15646op.impl(self, other, *op.outputs_[0]);
15647return std::move(op.outputs_[0]).take();
15648}
15649struct structured_ne_Tensor_out_out final : public at::native::structured_ne_Tensor_out {
15650 structured_ne_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
15651 void set_output_strided(
15652 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15653 TensorOptions options, DimnameList names
15654 ) override {
15655 const auto& out = outputs_[output_idx].get();
15656 resize_out(out, sizes, strides, options);
15657 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15658 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15659 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15660 }
15661 if (!names.empty()) {
15662 namedinference::propagate_names(outputs_[output_idx], names);
15663 }
15664 // super must happen after, so that downstream can use maybe_get_output
15665 // to retrieve the output
15666 at::native::structured_ne_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15667 }
15668 void set_output_raw_strided(
15669 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15670 TensorOptions options, DimnameList names
15671 ) override {
15672 const auto& out = outputs_[output_idx].get();
15673 resize_out(out, sizes, strides, options);
15674 if (!names.empty()) {
15675 namedinference::propagate_names(outputs_[output_idx], names);
15676 }
15677 // super must happen after, so that downstream can use maybe_get_output
15678 // to retrieve the output
15679 at::native::structured_ne_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15680 }
15681 const Tensor& maybe_get_output(int64_t output_idx) override {
15682 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15683 }
15684 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15685 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15686};
15687at::Tensor & wrapper_CPU_ne_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
15688structured_ne_Tensor_out_out op(out);
15689op.meta(self, other);
15690op.impl(self, other, op.maybe_get_output(0));
15691if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15692return out;
15693}
15694struct structured_ne_Tensor_out_inplace final : public at::native::structured_ne_Tensor_out {
15695 structured_ne_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
15696 void set_output_strided(
15697 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15698 TensorOptions options, DimnameList names
15699 ) override {
15700 const auto& out = outputs_[output_idx].get();
15701 check_inplace(out, sizes, options);
15702 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15703 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15704 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15705 }
15706 if (!names.empty()) {
15707 namedinference::propagate_names(outputs_[output_idx], names);
15708 }
15709 // super must happen after, so that downstream can use maybe_get_output
15710 // to retrieve the output
15711 at::native::structured_ne_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15712 }
15713 void set_output_raw_strided(
15714 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15715 TensorOptions options, DimnameList names
15716 ) override {
15717 const auto& out = outputs_[output_idx].get();
15718 check_inplace(out, sizes, options);
15719 if (!names.empty()) {
15720 namedinference::propagate_names(outputs_[output_idx], names);
15721 }
15722 // super must happen after, so that downstream can use maybe_get_output
15723 // to retrieve the output
15724 at::native::structured_ne_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15725 }
15726 const Tensor& maybe_get_output(int64_t output_idx) override {
15727 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15728 }
15729 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15730 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15731};
15732at::Tensor & wrapper_CPU_ne__Tensor(at::Tensor & self, const at::Tensor & other) {
15733structured_ne_Tensor_out_inplace op(self);
15734op.meta(self, other);
15735op.impl(self, other, op.outputs_[0]);
15736if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15737return self;
15738}
15739struct structured_ge_Scalar_out_functional final : public at::native::structured_ge_Scalar_out {
15740 void set_output_strided(
15741 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15742 TensorOptions options, DimnameList names
15743 ) override {
15744 outputs_[output_idx] = create_out(sizes, strides, options);
15745 if (!names.empty()) {
15746 namedinference::propagate_names(*outputs_[output_idx], names);
15747 }
15748 // super must happen after, so that downstream can use maybe_get_output
15749 // to retrieve the output
15750 at::native::structured_ge_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15751 }
15752 void set_output_raw_strided(
15753 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15754 TensorOptions options, DimnameList names
15755 ) override {
15756 outputs_[output_idx] = create_out(sizes, strides, options);
15757 if (!names.empty()) {
15758 namedinference::propagate_names(*outputs_[output_idx], names);
15759 }
15760 // super must happen after, so that downstream can use maybe_get_output
15761 // to retrieve the output
15762 at::native::structured_ge_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15763 }
15764 const Tensor& maybe_get_output(int64_t output_idx) override {
15765 return *outputs_[output_idx];
15766 }
15767 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15768};
15769at::Tensor wrapper_CPU_ge_Scalar(const at::Tensor & self, const at::Scalar & other) {
15770structured_ge_Scalar_out_functional op;
15771op.meta(self, other);
15772op.impl(self, other, *op.outputs_[0]);
15773return std::move(op.outputs_[0]).take();
15774}
15775struct structured_ge_Scalar_out_out final : public at::native::structured_ge_Scalar_out {
15776 structured_ge_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
15777 void set_output_strided(
15778 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15779 TensorOptions options, DimnameList names
15780 ) override {
15781 const auto& out = outputs_[output_idx].get();
15782 resize_out(out, sizes, strides, options);
15783 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15784 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15785 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15786 }
15787 if (!names.empty()) {
15788 namedinference::propagate_names(outputs_[output_idx], names);
15789 }
15790 // super must happen after, so that downstream can use maybe_get_output
15791 // to retrieve the output
15792 at::native::structured_ge_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15793 }
15794 void set_output_raw_strided(
15795 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15796 TensorOptions options, DimnameList names
15797 ) override {
15798 const auto& out = outputs_[output_idx].get();
15799 resize_out(out, sizes, strides, options);
15800 if (!names.empty()) {
15801 namedinference::propagate_names(outputs_[output_idx], names);
15802 }
15803 // super must happen after, so that downstream can use maybe_get_output
15804 // to retrieve the output
15805 at::native::structured_ge_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15806 }
15807 const Tensor& maybe_get_output(int64_t output_idx) override {
15808 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15809 }
15810 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15811 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15812};
15813at::Tensor & wrapper_CPU_ge_out_Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
15814structured_ge_Scalar_out_out op(out);
15815op.meta(self, other);
15816op.impl(self, other, op.maybe_get_output(0));
15817if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15818return out;
15819}
15820struct structured_ge_Scalar_out_inplace final : public at::native::structured_ge_Scalar_out {
15821 structured_ge_Scalar_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
15822 void set_output_strided(
15823 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15824 TensorOptions options, DimnameList names
15825 ) override {
15826 const auto& out = outputs_[output_idx].get();
15827 check_inplace(out, sizes, options);
15828 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15829 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15830 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15831 }
15832 if (!names.empty()) {
15833 namedinference::propagate_names(outputs_[output_idx], names);
15834 }
15835 // super must happen after, so that downstream can use maybe_get_output
15836 // to retrieve the output
15837 at::native::structured_ge_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15838 }
15839 void set_output_raw_strided(
15840 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15841 TensorOptions options, DimnameList names
15842 ) override {
15843 const auto& out = outputs_[output_idx].get();
15844 check_inplace(out, sizes, options);
15845 if (!names.empty()) {
15846 namedinference::propagate_names(outputs_[output_idx], names);
15847 }
15848 // super must happen after, so that downstream can use maybe_get_output
15849 // to retrieve the output
15850 at::native::structured_ge_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15851 }
15852 const Tensor& maybe_get_output(int64_t output_idx) override {
15853 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15854 }
15855 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15856 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15857};
15858at::Tensor & wrapper_CPU_ge__Scalar(at::Tensor & self, const at::Scalar & other) {
15859structured_ge_Scalar_out_inplace op(self);
15860op.meta(self, other);
15861op.impl(self, other, op.outputs_[0]);
15862if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15863return self;
15864}
15865struct structured_ge_Tensor_out_functional final : public at::native::structured_ge_Tensor_out {
15866 void set_output_strided(
15867 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15868 TensorOptions options, DimnameList names
15869 ) override {
15870 outputs_[output_idx] = create_out(sizes, strides, options);
15871 if (!names.empty()) {
15872 namedinference::propagate_names(*outputs_[output_idx], names);
15873 }
15874 // super must happen after, so that downstream can use maybe_get_output
15875 // to retrieve the output
15876 at::native::structured_ge_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15877 }
15878 void set_output_raw_strided(
15879 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15880 TensorOptions options, DimnameList names
15881 ) override {
15882 outputs_[output_idx] = create_out(sizes, strides, options);
15883 if (!names.empty()) {
15884 namedinference::propagate_names(*outputs_[output_idx], names);
15885 }
15886 // super must happen after, so that downstream can use maybe_get_output
15887 // to retrieve the output
15888 at::native::structured_ge_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15889 }
15890 const Tensor& maybe_get_output(int64_t output_idx) override {
15891 return *outputs_[output_idx];
15892 }
15893 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
15894};
15895at::Tensor wrapper_CPU_ge_Tensor(const at::Tensor & self, const at::Tensor & other) {
15896structured_ge_Tensor_out_functional op;
15897op.meta(self, other);
15898op.impl(self, other, *op.outputs_[0]);
15899return std::move(op.outputs_[0]).take();
15900}
15901struct structured_ge_Tensor_out_out final : public at::native::structured_ge_Tensor_out {
15902 structured_ge_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
15903 void set_output_strided(
15904 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15905 TensorOptions options, DimnameList names
15906 ) override {
15907 const auto& out = outputs_[output_idx].get();
15908 resize_out(out, sizes, strides, options);
15909 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15910 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15911 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15912 }
15913 if (!names.empty()) {
15914 namedinference::propagate_names(outputs_[output_idx], names);
15915 }
15916 // super must happen after, so that downstream can use maybe_get_output
15917 // to retrieve the output
15918 at::native::structured_ge_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15919 }
15920 void set_output_raw_strided(
15921 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15922 TensorOptions options, DimnameList names
15923 ) override {
15924 const auto& out = outputs_[output_idx].get();
15925 resize_out(out, sizes, strides, options);
15926 if (!names.empty()) {
15927 namedinference::propagate_names(outputs_[output_idx], names);
15928 }
15929 // super must happen after, so that downstream can use maybe_get_output
15930 // to retrieve the output
15931 at::native::structured_ge_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15932 }
15933 const Tensor& maybe_get_output(int64_t output_idx) override {
15934 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15935 }
15936 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15937 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15938};
15939at::Tensor & wrapper_CPU_ge_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
15940structured_ge_Tensor_out_out op(out);
15941op.meta(self, other);
15942op.impl(self, other, op.maybe_get_output(0));
15943if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15944return out;
15945}
15946struct structured_ge_Tensor_out_inplace final : public at::native::structured_ge_Tensor_out {
15947 structured_ge_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
15948 void set_output_strided(
15949 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15950 TensorOptions options, DimnameList names
15951 ) override {
15952 const auto& out = outputs_[output_idx].get();
15953 check_inplace(out, sizes, options);
15954 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
15955 if (C10_UNLIKELY(maybe_proxy.has_value())) {
15956 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
15957 }
15958 if (!names.empty()) {
15959 namedinference::propagate_names(outputs_[output_idx], names);
15960 }
15961 // super must happen after, so that downstream can use maybe_get_output
15962 // to retrieve the output
15963 at::native::structured_ge_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15964 }
15965 void set_output_raw_strided(
15966 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15967 TensorOptions options, DimnameList names
15968 ) override {
15969 const auto& out = outputs_[output_idx].get();
15970 check_inplace(out, sizes, options);
15971 if (!names.empty()) {
15972 namedinference::propagate_names(outputs_[output_idx], names);
15973 }
15974 // super must happen after, so that downstream can use maybe_get_output
15975 // to retrieve the output
15976 at::native::structured_ge_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
15977 }
15978 const Tensor& maybe_get_output(int64_t output_idx) override {
15979 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
15980 }
15981 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
15982 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
15983};
15984at::Tensor & wrapper_CPU_ge__Tensor(at::Tensor & self, const at::Tensor & other) {
15985structured_ge_Tensor_out_inplace op(self);
15986op.meta(self, other);
15987op.impl(self, other, op.outputs_[0]);
15988if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
15989return self;
15990}
15991struct structured_le_Scalar_out_functional final : public at::native::structured_le_Scalar_out {
15992 void set_output_strided(
15993 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
15994 TensorOptions options, DimnameList names
15995 ) override {
15996 outputs_[output_idx] = create_out(sizes, strides, options);
15997 if (!names.empty()) {
15998 namedinference::propagate_names(*outputs_[output_idx], names);
15999 }
16000 // super must happen after, so that downstream can use maybe_get_output
16001 // to retrieve the output
16002 at::native::structured_le_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16003 }
16004 void set_output_raw_strided(
16005 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16006 TensorOptions options, DimnameList names
16007 ) override {
16008 outputs_[output_idx] = create_out(sizes, strides, options);
16009 if (!names.empty()) {
16010 namedinference::propagate_names(*outputs_[output_idx], names);
16011 }
16012 // super must happen after, so that downstream can use maybe_get_output
16013 // to retrieve the output
16014 at::native::structured_le_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16015 }
16016 const Tensor& maybe_get_output(int64_t output_idx) override {
16017 return *outputs_[output_idx];
16018 }
16019 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16020};
16021at::Tensor wrapper_CPU_le_Scalar(const at::Tensor & self, const at::Scalar & other) {
16022structured_le_Scalar_out_functional op;
16023op.meta(self, other);
16024op.impl(self, other, *op.outputs_[0]);
16025return std::move(op.outputs_[0]).take();
16026}
16027struct structured_le_Scalar_out_out final : public at::native::structured_le_Scalar_out {
16028 structured_le_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
16029 void set_output_strided(
16030 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16031 TensorOptions options, DimnameList names
16032 ) override {
16033 const auto& out = outputs_[output_idx].get();
16034 resize_out(out, sizes, strides, options);
16035 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16036 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16037 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16038 }
16039 if (!names.empty()) {
16040 namedinference::propagate_names(outputs_[output_idx], names);
16041 }
16042 // super must happen after, so that downstream can use maybe_get_output
16043 // to retrieve the output
16044 at::native::structured_le_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16045 }
16046 void set_output_raw_strided(
16047 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16048 TensorOptions options, DimnameList names
16049 ) override {
16050 const auto& out = outputs_[output_idx].get();
16051 resize_out(out, sizes, strides, options);
16052 if (!names.empty()) {
16053 namedinference::propagate_names(outputs_[output_idx], names);
16054 }
16055 // super must happen after, so that downstream can use maybe_get_output
16056 // to retrieve the output
16057 at::native::structured_le_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16058 }
16059 const Tensor& maybe_get_output(int64_t output_idx) override {
16060 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16061 }
16062 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16063 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16064};
16065at::Tensor & wrapper_CPU_le_out_Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
16066structured_le_Scalar_out_out op(out);
16067op.meta(self, other);
16068op.impl(self, other, op.maybe_get_output(0));
16069if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16070return out;
16071}
16072struct structured_le_Scalar_out_inplace final : public at::native::structured_le_Scalar_out {
16073 structured_le_Scalar_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
16074 void set_output_strided(
16075 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16076 TensorOptions options, DimnameList names
16077 ) override {
16078 const auto& out = outputs_[output_idx].get();
16079 check_inplace(out, sizes, options);
16080 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16081 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16082 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16083 }
16084 if (!names.empty()) {
16085 namedinference::propagate_names(outputs_[output_idx], names);
16086 }
16087 // super must happen after, so that downstream can use maybe_get_output
16088 // to retrieve the output
16089 at::native::structured_le_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16090 }
16091 void set_output_raw_strided(
16092 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16093 TensorOptions options, DimnameList names
16094 ) override {
16095 const auto& out = outputs_[output_idx].get();
16096 check_inplace(out, sizes, options);
16097 if (!names.empty()) {
16098 namedinference::propagate_names(outputs_[output_idx], names);
16099 }
16100 // super must happen after, so that downstream can use maybe_get_output
16101 // to retrieve the output
16102 at::native::structured_le_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16103 }
16104 const Tensor& maybe_get_output(int64_t output_idx) override {
16105 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16106 }
16107 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16108 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16109};
16110at::Tensor & wrapper_CPU_le__Scalar(at::Tensor & self, const at::Scalar & other) {
16111structured_le_Scalar_out_inplace op(self);
16112op.meta(self, other);
16113op.impl(self, other, op.outputs_[0]);
16114if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16115return self;
16116}
16117struct structured_le_Tensor_out_functional final : public at::native::structured_le_Tensor_out {
16118 void set_output_strided(
16119 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16120 TensorOptions options, DimnameList names
16121 ) override {
16122 outputs_[output_idx] = create_out(sizes, strides, options);
16123 if (!names.empty()) {
16124 namedinference::propagate_names(*outputs_[output_idx], names);
16125 }
16126 // super must happen after, so that downstream can use maybe_get_output
16127 // to retrieve the output
16128 at::native::structured_le_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16129 }
16130 void set_output_raw_strided(
16131 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16132 TensorOptions options, DimnameList names
16133 ) override {
16134 outputs_[output_idx] = create_out(sizes, strides, options);
16135 if (!names.empty()) {
16136 namedinference::propagate_names(*outputs_[output_idx], names);
16137 }
16138 // super must happen after, so that downstream can use maybe_get_output
16139 // to retrieve the output
16140 at::native::structured_le_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16141 }
16142 const Tensor& maybe_get_output(int64_t output_idx) override {
16143 return *outputs_[output_idx];
16144 }
16145 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16146};
16147at::Tensor wrapper_CPU_le_Tensor(const at::Tensor & self, const at::Tensor & other) {
16148structured_le_Tensor_out_functional op;
16149op.meta(self, other);
16150op.impl(self, other, *op.outputs_[0]);
16151return std::move(op.outputs_[0]).take();
16152}
16153struct structured_le_Tensor_out_out final : public at::native::structured_le_Tensor_out {
16154 structured_le_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
16155 void set_output_strided(
16156 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16157 TensorOptions options, DimnameList names
16158 ) override {
16159 const auto& out = outputs_[output_idx].get();
16160 resize_out(out, sizes, strides, options);
16161 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16162 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16163 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16164 }
16165 if (!names.empty()) {
16166 namedinference::propagate_names(outputs_[output_idx], names);
16167 }
16168 // super must happen after, so that downstream can use maybe_get_output
16169 // to retrieve the output
16170 at::native::structured_le_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16171 }
16172 void set_output_raw_strided(
16173 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16174 TensorOptions options, DimnameList names
16175 ) override {
16176 const auto& out = outputs_[output_idx].get();
16177 resize_out(out, sizes, strides, options);
16178 if (!names.empty()) {
16179 namedinference::propagate_names(outputs_[output_idx], names);
16180 }
16181 // super must happen after, so that downstream can use maybe_get_output
16182 // to retrieve the output
16183 at::native::structured_le_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16184 }
16185 const Tensor& maybe_get_output(int64_t output_idx) override {
16186 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16187 }
16188 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16189 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16190};
16191at::Tensor & wrapper_CPU_le_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
16192structured_le_Tensor_out_out op(out);
16193op.meta(self, other);
16194op.impl(self, other, op.maybe_get_output(0));
16195if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16196return out;
16197}
16198struct structured_le_Tensor_out_inplace final : public at::native::structured_le_Tensor_out {
16199 structured_le_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
16200 void set_output_strided(
16201 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16202 TensorOptions options, DimnameList names
16203 ) override {
16204 const auto& out = outputs_[output_idx].get();
16205 check_inplace(out, sizes, options);
16206 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16207 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16208 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16209 }
16210 if (!names.empty()) {
16211 namedinference::propagate_names(outputs_[output_idx], names);
16212 }
16213 // super must happen after, so that downstream can use maybe_get_output
16214 // to retrieve the output
16215 at::native::structured_le_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16216 }
16217 void set_output_raw_strided(
16218 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16219 TensorOptions options, DimnameList names
16220 ) override {
16221 const auto& out = outputs_[output_idx].get();
16222 check_inplace(out, sizes, options);
16223 if (!names.empty()) {
16224 namedinference::propagate_names(outputs_[output_idx], names);
16225 }
16226 // super must happen after, so that downstream can use maybe_get_output
16227 // to retrieve the output
16228 at::native::structured_le_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16229 }
16230 const Tensor& maybe_get_output(int64_t output_idx) override {
16231 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16232 }
16233 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16234 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16235};
16236at::Tensor & wrapper_CPU_le__Tensor(at::Tensor & self, const at::Tensor & other) {
16237structured_le_Tensor_out_inplace op(self);
16238op.meta(self, other);
16239op.impl(self, other, op.outputs_[0]);
16240if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16241return self;
16242}
16243struct structured_gt_Scalar_out_functional final : public at::native::structured_gt_Scalar_out {
16244 void set_output_strided(
16245 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16246 TensorOptions options, DimnameList names
16247 ) override {
16248 outputs_[output_idx] = create_out(sizes, strides, options);
16249 if (!names.empty()) {
16250 namedinference::propagate_names(*outputs_[output_idx], names);
16251 }
16252 // super must happen after, so that downstream can use maybe_get_output
16253 // to retrieve the output
16254 at::native::structured_gt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16255 }
16256 void set_output_raw_strided(
16257 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16258 TensorOptions options, DimnameList names
16259 ) override {
16260 outputs_[output_idx] = create_out(sizes, strides, options);
16261 if (!names.empty()) {
16262 namedinference::propagate_names(*outputs_[output_idx], names);
16263 }
16264 // super must happen after, so that downstream can use maybe_get_output
16265 // to retrieve the output
16266 at::native::structured_gt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16267 }
16268 const Tensor& maybe_get_output(int64_t output_idx) override {
16269 return *outputs_[output_idx];
16270 }
16271 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16272};
16273at::Tensor wrapper_CPU_gt_Scalar(const at::Tensor & self, const at::Scalar & other) {
16274structured_gt_Scalar_out_functional op;
16275op.meta(self, other);
16276op.impl(self, other, *op.outputs_[0]);
16277return std::move(op.outputs_[0]).take();
16278}
16279struct structured_gt_Scalar_out_out final : public at::native::structured_gt_Scalar_out {
16280 structured_gt_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
16281 void set_output_strided(
16282 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16283 TensorOptions options, DimnameList names
16284 ) override {
16285 const auto& out = outputs_[output_idx].get();
16286 resize_out(out, sizes, strides, options);
16287 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16288 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16289 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16290 }
16291 if (!names.empty()) {
16292 namedinference::propagate_names(outputs_[output_idx], names);
16293 }
16294 // super must happen after, so that downstream can use maybe_get_output
16295 // to retrieve the output
16296 at::native::structured_gt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16297 }
16298 void set_output_raw_strided(
16299 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16300 TensorOptions options, DimnameList names
16301 ) override {
16302 const auto& out = outputs_[output_idx].get();
16303 resize_out(out, sizes, strides, options);
16304 if (!names.empty()) {
16305 namedinference::propagate_names(outputs_[output_idx], names);
16306 }
16307 // super must happen after, so that downstream can use maybe_get_output
16308 // to retrieve the output
16309 at::native::structured_gt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16310 }
16311 const Tensor& maybe_get_output(int64_t output_idx) override {
16312 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16313 }
16314 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16315 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16316};
16317at::Tensor & wrapper_CPU_gt_out_Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
16318structured_gt_Scalar_out_out op(out);
16319op.meta(self, other);
16320op.impl(self, other, op.maybe_get_output(0));
16321if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16322return out;
16323}
16324struct structured_gt_Scalar_out_inplace final : public at::native::structured_gt_Scalar_out {
16325 structured_gt_Scalar_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
16326 void set_output_strided(
16327 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16328 TensorOptions options, DimnameList names
16329 ) override {
16330 const auto& out = outputs_[output_idx].get();
16331 check_inplace(out, sizes, options);
16332 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16333 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16334 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16335 }
16336 if (!names.empty()) {
16337 namedinference::propagate_names(outputs_[output_idx], names);
16338 }
16339 // super must happen after, so that downstream can use maybe_get_output
16340 // to retrieve the output
16341 at::native::structured_gt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16342 }
16343 void set_output_raw_strided(
16344 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16345 TensorOptions options, DimnameList names
16346 ) override {
16347 const auto& out = outputs_[output_idx].get();
16348 check_inplace(out, sizes, options);
16349 if (!names.empty()) {
16350 namedinference::propagate_names(outputs_[output_idx], names);
16351 }
16352 // super must happen after, so that downstream can use maybe_get_output
16353 // to retrieve the output
16354 at::native::structured_gt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16355 }
16356 const Tensor& maybe_get_output(int64_t output_idx) override {
16357 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16358 }
16359 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16360 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16361};
16362at::Tensor & wrapper_CPU_gt__Scalar(at::Tensor & self, const at::Scalar & other) {
16363structured_gt_Scalar_out_inplace op(self);
16364op.meta(self, other);
16365op.impl(self, other, op.outputs_[0]);
16366if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16367return self;
16368}
16369struct structured_gt_Tensor_out_functional final : public at::native::structured_gt_Tensor_out {
16370 void set_output_strided(
16371 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16372 TensorOptions options, DimnameList names
16373 ) override {
16374 outputs_[output_idx] = create_out(sizes, strides, options);
16375 if (!names.empty()) {
16376 namedinference::propagate_names(*outputs_[output_idx], names);
16377 }
16378 // super must happen after, so that downstream can use maybe_get_output
16379 // to retrieve the output
16380 at::native::structured_gt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16381 }
16382 void set_output_raw_strided(
16383 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16384 TensorOptions options, DimnameList names
16385 ) override {
16386 outputs_[output_idx] = create_out(sizes, strides, options);
16387 if (!names.empty()) {
16388 namedinference::propagate_names(*outputs_[output_idx], names);
16389 }
16390 // super must happen after, so that downstream can use maybe_get_output
16391 // to retrieve the output
16392 at::native::structured_gt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16393 }
16394 const Tensor& maybe_get_output(int64_t output_idx) override {
16395 return *outputs_[output_idx];
16396 }
16397 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16398};
16399at::Tensor wrapper_CPU_gt_Tensor(const at::Tensor & self, const at::Tensor & other) {
16400structured_gt_Tensor_out_functional op;
16401op.meta(self, other);
16402op.impl(self, other, *op.outputs_[0]);
16403return std::move(op.outputs_[0]).take();
16404}
16405struct structured_gt_Tensor_out_out final : public at::native::structured_gt_Tensor_out {
16406 structured_gt_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
16407 void set_output_strided(
16408 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16409 TensorOptions options, DimnameList names
16410 ) override {
16411 const auto& out = outputs_[output_idx].get();
16412 resize_out(out, sizes, strides, options);
16413 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16414 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16415 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16416 }
16417 if (!names.empty()) {
16418 namedinference::propagate_names(outputs_[output_idx], names);
16419 }
16420 // super must happen after, so that downstream can use maybe_get_output
16421 // to retrieve the output
16422 at::native::structured_gt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16423 }
16424 void set_output_raw_strided(
16425 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16426 TensorOptions options, DimnameList names
16427 ) override {
16428 const auto& out = outputs_[output_idx].get();
16429 resize_out(out, sizes, strides, options);
16430 if (!names.empty()) {
16431 namedinference::propagate_names(outputs_[output_idx], names);
16432 }
16433 // super must happen after, so that downstream can use maybe_get_output
16434 // to retrieve the output
16435 at::native::structured_gt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16436 }
16437 const Tensor& maybe_get_output(int64_t output_idx) override {
16438 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16439 }
16440 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16441 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16442};
16443at::Tensor & wrapper_CPU_gt_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
16444structured_gt_Tensor_out_out op(out);
16445op.meta(self, other);
16446op.impl(self, other, op.maybe_get_output(0));
16447if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16448return out;
16449}
16450struct structured_gt_Tensor_out_inplace final : public at::native::structured_gt_Tensor_out {
16451 structured_gt_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
16452 void set_output_strided(
16453 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16454 TensorOptions options, DimnameList names
16455 ) override {
16456 const auto& out = outputs_[output_idx].get();
16457 check_inplace(out, sizes, options);
16458 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16459 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16460 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16461 }
16462 if (!names.empty()) {
16463 namedinference::propagate_names(outputs_[output_idx], names);
16464 }
16465 // super must happen after, so that downstream can use maybe_get_output
16466 // to retrieve the output
16467 at::native::structured_gt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16468 }
16469 void set_output_raw_strided(
16470 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16471 TensorOptions options, DimnameList names
16472 ) override {
16473 const auto& out = outputs_[output_idx].get();
16474 check_inplace(out, sizes, options);
16475 if (!names.empty()) {
16476 namedinference::propagate_names(outputs_[output_idx], names);
16477 }
16478 // super must happen after, so that downstream can use maybe_get_output
16479 // to retrieve the output
16480 at::native::structured_gt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16481 }
16482 const Tensor& maybe_get_output(int64_t output_idx) override {
16483 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16484 }
16485 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16486 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16487};
16488at::Tensor & wrapper_CPU_gt__Tensor(at::Tensor & self, const at::Tensor & other) {
16489structured_gt_Tensor_out_inplace op(self);
16490op.meta(self, other);
16491op.impl(self, other, op.outputs_[0]);
16492if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16493return self;
16494}
16495struct structured_lt_Scalar_out_functional final : public at::native::structured_lt_Scalar_out {
16496 void set_output_strided(
16497 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16498 TensorOptions options, DimnameList names
16499 ) override {
16500 outputs_[output_idx] = create_out(sizes, strides, options);
16501 if (!names.empty()) {
16502 namedinference::propagate_names(*outputs_[output_idx], names);
16503 }
16504 // super must happen after, so that downstream can use maybe_get_output
16505 // to retrieve the output
16506 at::native::structured_lt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16507 }
16508 void set_output_raw_strided(
16509 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16510 TensorOptions options, DimnameList names
16511 ) override {
16512 outputs_[output_idx] = create_out(sizes, strides, options);
16513 if (!names.empty()) {
16514 namedinference::propagate_names(*outputs_[output_idx], names);
16515 }
16516 // super must happen after, so that downstream can use maybe_get_output
16517 // to retrieve the output
16518 at::native::structured_lt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16519 }
16520 const Tensor& maybe_get_output(int64_t output_idx) override {
16521 return *outputs_[output_idx];
16522 }
16523 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16524};
16525at::Tensor wrapper_CPU_lt_Scalar(const at::Tensor & self, const at::Scalar & other) {
16526structured_lt_Scalar_out_functional op;
16527op.meta(self, other);
16528op.impl(self, other, *op.outputs_[0]);
16529return std::move(op.outputs_[0]).take();
16530}
16531struct structured_lt_Scalar_out_out final : public at::native::structured_lt_Scalar_out {
16532 structured_lt_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
16533 void set_output_strided(
16534 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16535 TensorOptions options, DimnameList names
16536 ) override {
16537 const auto& out = outputs_[output_idx].get();
16538 resize_out(out, sizes, strides, options);
16539 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16540 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16541 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16542 }
16543 if (!names.empty()) {
16544 namedinference::propagate_names(outputs_[output_idx], names);
16545 }
16546 // super must happen after, so that downstream can use maybe_get_output
16547 // to retrieve the output
16548 at::native::structured_lt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16549 }
16550 void set_output_raw_strided(
16551 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16552 TensorOptions options, DimnameList names
16553 ) override {
16554 const auto& out = outputs_[output_idx].get();
16555 resize_out(out, sizes, strides, options);
16556 if (!names.empty()) {
16557 namedinference::propagate_names(outputs_[output_idx], names);
16558 }
16559 // super must happen after, so that downstream can use maybe_get_output
16560 // to retrieve the output
16561 at::native::structured_lt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16562 }
16563 const Tensor& maybe_get_output(int64_t output_idx) override {
16564 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16565 }
16566 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16567 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16568};
16569at::Tensor & wrapper_CPU_lt_out_Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
16570structured_lt_Scalar_out_out op(out);
16571op.meta(self, other);
16572op.impl(self, other, op.maybe_get_output(0));
16573if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16574return out;
16575}
16576struct structured_lt_Scalar_out_inplace final : public at::native::structured_lt_Scalar_out {
16577 structured_lt_Scalar_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
16578 void set_output_strided(
16579 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16580 TensorOptions options, DimnameList names
16581 ) override {
16582 const auto& out = outputs_[output_idx].get();
16583 check_inplace(out, sizes, options);
16584 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16585 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16586 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16587 }
16588 if (!names.empty()) {
16589 namedinference::propagate_names(outputs_[output_idx], names);
16590 }
16591 // super must happen after, so that downstream can use maybe_get_output
16592 // to retrieve the output
16593 at::native::structured_lt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16594 }
16595 void set_output_raw_strided(
16596 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16597 TensorOptions options, DimnameList names
16598 ) override {
16599 const auto& out = outputs_[output_idx].get();
16600 check_inplace(out, sizes, options);
16601 if (!names.empty()) {
16602 namedinference::propagate_names(outputs_[output_idx], names);
16603 }
16604 // super must happen after, so that downstream can use maybe_get_output
16605 // to retrieve the output
16606 at::native::structured_lt_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16607 }
16608 const Tensor& maybe_get_output(int64_t output_idx) override {
16609 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16610 }
16611 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16612 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16613};
16614at::Tensor & wrapper_CPU_lt__Scalar(at::Tensor & self, const at::Scalar & other) {
16615structured_lt_Scalar_out_inplace op(self);
16616op.meta(self, other);
16617op.impl(self, other, op.outputs_[0]);
16618if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16619return self;
16620}
16621struct structured_lt_Tensor_out_functional final : public at::native::structured_lt_Tensor_out {
16622 void set_output_strided(
16623 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16624 TensorOptions options, DimnameList names
16625 ) override {
16626 outputs_[output_idx] = create_out(sizes, strides, options);
16627 if (!names.empty()) {
16628 namedinference::propagate_names(*outputs_[output_idx], names);
16629 }
16630 // super must happen after, so that downstream can use maybe_get_output
16631 // to retrieve the output
16632 at::native::structured_lt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16633 }
16634 void set_output_raw_strided(
16635 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16636 TensorOptions options, DimnameList names
16637 ) override {
16638 outputs_[output_idx] = create_out(sizes, strides, options);
16639 if (!names.empty()) {
16640 namedinference::propagate_names(*outputs_[output_idx], names);
16641 }
16642 // super must happen after, so that downstream can use maybe_get_output
16643 // to retrieve the output
16644 at::native::structured_lt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16645 }
16646 const Tensor& maybe_get_output(int64_t output_idx) override {
16647 return *outputs_[output_idx];
16648 }
16649 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16650};
16651at::Tensor wrapper_CPU_lt_Tensor(const at::Tensor & self, const at::Tensor & other) {
16652structured_lt_Tensor_out_functional op;
16653op.meta(self, other);
16654op.impl(self, other, *op.outputs_[0]);
16655return std::move(op.outputs_[0]).take();
16656}
16657struct structured_lt_Tensor_out_out final : public at::native::structured_lt_Tensor_out {
16658 structured_lt_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
16659 void set_output_strided(
16660 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16661 TensorOptions options, DimnameList names
16662 ) override {
16663 const auto& out = outputs_[output_idx].get();
16664 resize_out(out, sizes, strides, options);
16665 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16666 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16667 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16668 }
16669 if (!names.empty()) {
16670 namedinference::propagate_names(outputs_[output_idx], names);
16671 }
16672 // super must happen after, so that downstream can use maybe_get_output
16673 // to retrieve the output
16674 at::native::structured_lt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16675 }
16676 void set_output_raw_strided(
16677 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16678 TensorOptions options, DimnameList names
16679 ) override {
16680 const auto& out = outputs_[output_idx].get();
16681 resize_out(out, sizes, strides, options);
16682 if (!names.empty()) {
16683 namedinference::propagate_names(outputs_[output_idx], names);
16684 }
16685 // super must happen after, so that downstream can use maybe_get_output
16686 // to retrieve the output
16687 at::native::structured_lt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16688 }
16689 const Tensor& maybe_get_output(int64_t output_idx) override {
16690 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16691 }
16692 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16693 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16694};
16695at::Tensor & wrapper_CPU_lt_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
16696structured_lt_Tensor_out_out op(out);
16697op.meta(self, other);
16698op.impl(self, other, op.maybe_get_output(0));
16699if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16700return out;
16701}
16702struct structured_lt_Tensor_out_inplace final : public at::native::structured_lt_Tensor_out {
16703 structured_lt_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
16704 void set_output_strided(
16705 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16706 TensorOptions options, DimnameList names
16707 ) override {
16708 const auto& out = outputs_[output_idx].get();
16709 check_inplace(out, sizes, options);
16710 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16711 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16712 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16713 }
16714 if (!names.empty()) {
16715 namedinference::propagate_names(outputs_[output_idx], names);
16716 }
16717 // super must happen after, so that downstream can use maybe_get_output
16718 // to retrieve the output
16719 at::native::structured_lt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16720 }
16721 void set_output_raw_strided(
16722 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16723 TensorOptions options, DimnameList names
16724 ) override {
16725 const auto& out = outputs_[output_idx].get();
16726 check_inplace(out, sizes, options);
16727 if (!names.empty()) {
16728 namedinference::propagate_names(outputs_[output_idx], names);
16729 }
16730 // super must happen after, so that downstream can use maybe_get_output
16731 // to retrieve the output
16732 at::native::structured_lt_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16733 }
16734 const Tensor& maybe_get_output(int64_t output_idx) override {
16735 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16736 }
16737 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16738 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16739};
16740at::Tensor & wrapper_CPU_lt__Tensor(at::Tensor & self, const at::Tensor & other) {
16741structured_lt_Tensor_out_inplace op(self);
16742op.meta(self, other);
16743op.impl(self, other, op.outputs_[0]);
16744if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16745return self;
16746}
16747namespace {
16748at::Tensor wrapper_CPU__take(const at::Tensor & self, const at::Tensor & index) {
16749 // No device check
16750 // DeviceGuard omitted
16751 return at::native::take(self, index);
16752}
16753} // anonymous namespace
16754namespace {
16755at::Tensor & wrapper_CPU_out_take_out(const at::Tensor & self, const at::Tensor & index, at::Tensor & out) {
16756 // No device check
16757 // DeviceGuard omitted
16758 return at::native::take_out(self, index, out);
16759}
16760} // anonymous namespace
16761namespace {
16762at::Tensor wrapper_CPU__index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
16763 // No device check
16764 // DeviceGuard omitted
16765 return at::native::index_select_cpu_(self, dim, index);
16766}
16767} // anonymous namespace
16768namespace {
16769at::Tensor & wrapper_CPU_out_index_select_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) {
16770 // No device check
16771 // DeviceGuard omitted
16772 return at::native::index_select_out_cpu_(self, dim, index, out);
16773}
16774} // anonymous namespace
16775namespace {
16776at::Tensor wrapper_CPU__masked_select(const at::Tensor & self, const at::Tensor & mask) {
16777 // No device check
16778 // DeviceGuard omitted
16779 return at::native::masked_select_cpu(self, mask);
16780}
16781} // anonymous namespace
16782namespace {
16783at::Tensor & wrapper_CPU_out_masked_select_out(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
16784 // No device check
16785 // DeviceGuard omitted
16786 return at::native::masked_select_out_cpu(self, mask, out);
16787}
16788} // anonymous namespace
16789namespace {
16790at::Tensor wrapper_CPU__nonzero(const at::Tensor & self) {
16791 // No device check
16792 // DeviceGuard omitted
16793 return at::native::nonzero_cpu(self);
16794}
16795} // anonymous namespace
16796namespace {
16797at::Tensor & wrapper_CPU_out_nonzero_out(const at::Tensor & self, at::Tensor & out) {
16798 // No device check
16799 // DeviceGuard omitted
16800 return at::native::nonzero_out_cpu(self, out);
16801}
16802} // anonymous namespace
16803struct structured_gather_out_functional final : public at::native::structured_gather_out {
16804 void set_output_strided(
16805 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16806 TensorOptions options, DimnameList names
16807 ) override {
16808 outputs_[output_idx] = create_out(sizes, strides, options);
16809 if (!names.empty()) {
16810 namedinference::propagate_names(*outputs_[output_idx], names);
16811 }
16812 // super must happen after, so that downstream can use maybe_get_output
16813 // to retrieve the output
16814 }
16815 void set_output_raw_strided(
16816 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16817 TensorOptions options, DimnameList names
16818 ) override {
16819 outputs_[output_idx] = create_out(sizes, strides, options);
16820 if (!names.empty()) {
16821 namedinference::propagate_names(*outputs_[output_idx], names);
16822 }
16823 // super must happen after, so that downstream can use maybe_get_output
16824 // to retrieve the output
16825 }
16826 const Tensor& maybe_get_output(int64_t output_idx) override {
16827 return *outputs_[output_idx];
16828 }
16829 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16830};
16831at::Tensor wrapper_CPU_gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
16832structured_gather_out_functional op;
16833op.meta(self, dim, index, sparse_grad);
16834op.impl(self, dim, index, sparse_grad, *op.outputs_[0]);
16835return std::move(op.outputs_[0]).take();
16836}
16837struct structured_gather_out_out final : public at::native::structured_gather_out {
16838 structured_gather_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
16839 void set_output_strided(
16840 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16841 TensorOptions options, DimnameList names
16842 ) override {
16843 const auto& out = outputs_[output_idx].get();
16844 resize_out(out, sizes, strides, options);
16845 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16846 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16847 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16848 }
16849 if (!names.empty()) {
16850 namedinference::propagate_names(outputs_[output_idx], names);
16851 }
16852 // super must happen after, so that downstream can use maybe_get_output
16853 // to retrieve the output
16854 }
16855 void set_output_raw_strided(
16856 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16857 TensorOptions options, DimnameList names
16858 ) override {
16859 const auto& out = outputs_[output_idx].get();
16860 resize_out(out, sizes, strides, options);
16861 if (!names.empty()) {
16862 namedinference::propagate_names(outputs_[output_idx], names);
16863 }
16864 // super must happen after, so that downstream can use maybe_get_output
16865 // to retrieve the output
16866 }
16867 const Tensor& maybe_get_output(int64_t output_idx) override {
16868 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16869 }
16870 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16871 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16872};
16873at::Tensor & wrapper_CPU_gather_out_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
16874structured_gather_out_out op(out);
16875op.meta(self, dim, index, sparse_grad);
16876op.impl(self, dim, index, sparse_grad, op.maybe_get_output(0));
16877if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16878return out;
16879}
16880struct structured_addcmul_out_functional final : public at::native::structured_addcmul_out {
16881 void set_output_strided(
16882 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16883 TensorOptions options, DimnameList names
16884 ) override {
16885 outputs_[output_idx] = create_out(sizes, strides, options);
16886 if (!names.empty()) {
16887 namedinference::propagate_names(*outputs_[output_idx], names);
16888 }
16889 // super must happen after, so that downstream can use maybe_get_output
16890 // to retrieve the output
16891 at::native::structured_addcmul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16892 }
16893 void set_output_raw_strided(
16894 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16895 TensorOptions options, DimnameList names
16896 ) override {
16897 outputs_[output_idx] = create_out(sizes, strides, options);
16898 if (!names.empty()) {
16899 namedinference::propagate_names(*outputs_[output_idx], names);
16900 }
16901 // super must happen after, so that downstream can use maybe_get_output
16902 // to retrieve the output
16903 at::native::structured_addcmul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16904 }
16905 const Tensor& maybe_get_output(int64_t output_idx) override {
16906 return *outputs_[output_idx];
16907 }
16908 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
16909};
16910at::Tensor wrapper_CPU_addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
16911structured_addcmul_out_functional op;
16912op.meta(self, tensor1, tensor2, value);
16913op.impl(self, tensor1, tensor2, value, *op.outputs_[0]);
16914return std::move(op.outputs_[0]).take();
16915}
16916struct structured_addcmul_out_out final : public at::native::structured_addcmul_out {
16917 structured_addcmul_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
16918 void set_output_strided(
16919 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16920 TensorOptions options, DimnameList names
16921 ) override {
16922 const auto& out = outputs_[output_idx].get();
16923 resize_out(out, sizes, strides, options);
16924 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16925 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16926 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16927 }
16928 if (!names.empty()) {
16929 namedinference::propagate_names(outputs_[output_idx], names);
16930 }
16931 // super must happen after, so that downstream can use maybe_get_output
16932 // to retrieve the output
16933 at::native::structured_addcmul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16934 }
16935 void set_output_raw_strided(
16936 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16937 TensorOptions options, DimnameList names
16938 ) override {
16939 const auto& out = outputs_[output_idx].get();
16940 resize_out(out, sizes, strides, options);
16941 if (!names.empty()) {
16942 namedinference::propagate_names(outputs_[output_idx], names);
16943 }
16944 // super must happen after, so that downstream can use maybe_get_output
16945 // to retrieve the output
16946 at::native::structured_addcmul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16947 }
16948 const Tensor& maybe_get_output(int64_t output_idx) override {
16949 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16950 }
16951 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16952 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16953};
16954at::Tensor & wrapper_CPU_addcmul_out_out(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
16955structured_addcmul_out_out op(out);
16956op.meta(self, tensor1, tensor2, value);
16957op.impl(self, tensor1, tensor2, value, op.maybe_get_output(0));
16958if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
16959return out;
16960}
16961struct structured_addcmul_out_inplace final : public at::native::structured_addcmul_out {
16962 structured_addcmul_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
16963 void set_output_strided(
16964 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16965 TensorOptions options, DimnameList names
16966 ) override {
16967 const auto& out = outputs_[output_idx].get();
16968 check_inplace(out, sizes, options);
16969 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
16970 if (C10_UNLIKELY(maybe_proxy.has_value())) {
16971 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
16972 }
16973 if (!names.empty()) {
16974 namedinference::propagate_names(outputs_[output_idx], names);
16975 }
16976 // super must happen after, so that downstream can use maybe_get_output
16977 // to retrieve the output
16978 at::native::structured_addcmul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16979 }
16980 void set_output_raw_strided(
16981 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
16982 TensorOptions options, DimnameList names
16983 ) override {
16984 const auto& out = outputs_[output_idx].get();
16985 check_inplace(out, sizes, options);
16986 if (!names.empty()) {
16987 namedinference::propagate_names(outputs_[output_idx], names);
16988 }
16989 // super must happen after, so that downstream can use maybe_get_output
16990 // to retrieve the output
16991 at::native::structured_addcmul_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
16992 }
16993 const Tensor& maybe_get_output(int64_t output_idx) override {
16994 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
16995 }
16996 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
16997 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
16998};
16999at::Tensor & wrapper_CPU_addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
17000structured_addcmul_out_inplace op(self);
17001op.meta(self, tensor1, tensor2, value);
17002op.impl(self, tensor1, tensor2, value, op.outputs_[0]);
17003if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17004return self;
17005}
17006struct structured_addcdiv_out_functional final : public at::native::structured_addcdiv_out {
17007 void set_output_strided(
17008 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17009 TensorOptions options, DimnameList names
17010 ) override {
17011 outputs_[output_idx] = create_out(sizes, strides, options);
17012 if (!names.empty()) {
17013 namedinference::propagate_names(*outputs_[output_idx], names);
17014 }
17015 // super must happen after, so that downstream can use maybe_get_output
17016 // to retrieve the output
17017 at::native::structured_addcdiv_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17018 }
17019 void set_output_raw_strided(
17020 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17021 TensorOptions options, DimnameList names
17022 ) override {
17023 outputs_[output_idx] = create_out(sizes, strides, options);
17024 if (!names.empty()) {
17025 namedinference::propagate_names(*outputs_[output_idx], names);
17026 }
17027 // super must happen after, so that downstream can use maybe_get_output
17028 // to retrieve the output
17029 at::native::structured_addcdiv_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17030 }
17031 const Tensor& maybe_get_output(int64_t output_idx) override {
17032 return *outputs_[output_idx];
17033 }
17034 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17035};
17036at::Tensor wrapper_CPU_addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
17037structured_addcdiv_out_functional op;
17038op.meta(self, tensor1, tensor2, value);
17039op.impl(self, tensor1, tensor2, value, *op.outputs_[0]);
17040return std::move(op.outputs_[0]).take();
17041}
17042struct structured_addcdiv_out_out final : public at::native::structured_addcdiv_out {
17043 structured_addcdiv_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
17044 void set_output_strided(
17045 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17046 TensorOptions options, DimnameList names
17047 ) override {
17048 const auto& out = outputs_[output_idx].get();
17049 resize_out(out, sizes, strides, options);
17050 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
17051 if (C10_UNLIKELY(maybe_proxy.has_value())) {
17052 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
17053 }
17054 if (!names.empty()) {
17055 namedinference::propagate_names(outputs_[output_idx], names);
17056 }
17057 // super must happen after, so that downstream can use maybe_get_output
17058 // to retrieve the output
17059 at::native::structured_addcdiv_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17060 }
17061 void set_output_raw_strided(
17062 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17063 TensorOptions options, DimnameList names
17064 ) override {
17065 const auto& out = outputs_[output_idx].get();
17066 resize_out(out, sizes, strides, options);
17067 if (!names.empty()) {
17068 namedinference::propagate_names(outputs_[output_idx], names);
17069 }
17070 // super must happen after, so that downstream can use maybe_get_output
17071 // to retrieve the output
17072 at::native::structured_addcdiv_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17073 }
17074 const Tensor& maybe_get_output(int64_t output_idx) override {
17075 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
17076 }
17077 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
17078 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
17079};
17080at::Tensor & wrapper_CPU_addcdiv_out_out(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
17081structured_addcdiv_out_out op(out);
17082op.meta(self, tensor1, tensor2, value);
17083op.impl(self, tensor1, tensor2, value, op.maybe_get_output(0));
17084if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17085return out;
17086}
17087struct structured_addcdiv_out_inplace final : public at::native::structured_addcdiv_out {
17088 structured_addcdiv_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
17089 void set_output_strided(
17090 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17091 TensorOptions options, DimnameList names
17092 ) override {
17093 const auto& out = outputs_[output_idx].get();
17094 check_inplace(out, sizes, options);
17095 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
17096 if (C10_UNLIKELY(maybe_proxy.has_value())) {
17097 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
17098 }
17099 if (!names.empty()) {
17100 namedinference::propagate_names(outputs_[output_idx], names);
17101 }
17102 // super must happen after, so that downstream can use maybe_get_output
17103 // to retrieve the output
17104 at::native::structured_addcdiv_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17105 }
17106 void set_output_raw_strided(
17107 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17108 TensorOptions options, DimnameList names
17109 ) override {
17110 const auto& out = outputs_[output_idx].get();
17111 check_inplace(out, sizes, options);
17112 if (!names.empty()) {
17113 namedinference::propagate_names(outputs_[output_idx], names);
17114 }
17115 // super must happen after, so that downstream can use maybe_get_output
17116 // to retrieve the output
17117 at::native::structured_addcdiv_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17118 }
17119 const Tensor& maybe_get_output(int64_t output_idx) override {
17120 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
17121 }
17122 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
17123 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
17124};
17125at::Tensor & wrapper_CPU_addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
17126structured_addcdiv_out_inplace op(self);
17127op.meta(self, tensor1, tensor2, value);
17128op.impl(self, tensor1, tensor2, value, op.outputs_[0]);
17129if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17130return self;
17131}
17132struct structured_triangular_solve_out_functional final : public at::native::structured_triangular_solve_out {
17133 void set_output_strided(
17134 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17135 TensorOptions options, DimnameList names
17136 ) override {
17137 outputs_[output_idx] = create_out(sizes, strides, options);
17138 if (!names.empty()) {
17139 namedinference::propagate_names(*outputs_[output_idx], names);
17140 }
17141 // super must happen after, so that downstream can use maybe_get_output
17142 // to retrieve the output
17143 }
17144 void set_output_raw_strided(
17145 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17146 TensorOptions options, DimnameList names
17147 ) override {
17148 outputs_[output_idx] = create_out(sizes, strides, options);
17149 if (!names.empty()) {
17150 namedinference::propagate_names(*outputs_[output_idx], names);
17151 }
17152 // super must happen after, so that downstream can use maybe_get_output
17153 // to retrieve the output
17154 }
17155 const Tensor& maybe_get_output(int64_t output_idx) override {
17156 return *outputs_[output_idx];
17157 }
17158 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
17159};
17160::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_triangular_solve(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
17161structured_triangular_solve_out_functional op;
17162op.meta(self, A, upper, transpose, unitriangular);
17163op.impl(self, A, upper, transpose, unitriangular, *op.outputs_[0], *op.outputs_[1]);
17164return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
17165}
17166struct structured_triangular_solve_out_out final : public at::native::structured_triangular_solve_out {
17167 structured_triangular_solve_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
17168 void set_output_strided(
17169 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17170 TensorOptions options, DimnameList names
17171 ) override {
17172 const auto& out = outputs_[output_idx].get();
17173 resize_out(out, sizes, strides, options);
17174 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
17175 if (C10_UNLIKELY(maybe_proxy.has_value())) {
17176 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
17177 }
17178 if (!names.empty()) {
17179 namedinference::propagate_names(outputs_[output_idx], names);
17180 }
17181 // super must happen after, so that downstream can use maybe_get_output
17182 // to retrieve the output
17183 }
17184 void set_output_raw_strided(
17185 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17186 TensorOptions options, DimnameList names
17187 ) override {
17188 const auto& out = outputs_[output_idx].get();
17189 resize_out(out, sizes, strides, options);
17190 if (!names.empty()) {
17191 namedinference::propagate_names(outputs_[output_idx], names);
17192 }
17193 // super must happen after, so that downstream can use maybe_get_output
17194 // to retrieve the output
17195 }
17196 const Tensor& maybe_get_output(int64_t output_idx) override {
17197 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
17198 }
17199 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
17200 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
17201};
17202::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_triangular_solve_out_X(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
17203structured_triangular_solve_out_out op(X, M);
17204op.meta(self, A, upper, transpose, unitriangular);
17205op.impl(self, A, upper, transpose, unitriangular, op.maybe_get_output(0), op.maybe_get_output(1));
17206if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17207if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
17208return std::forward_as_tuple(X, M);
17209}
17210namespace {
17211at::Tensor wrapper_CPU__linalg_solve_triangular(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
17212 // No device check
17213 // DeviceGuard omitted
17214 return at::native::linalg_solve_triangular(self, B, upper, left, unitriangular);
17215}
17216} // anonymous namespace
17217namespace {
17218at::Tensor & wrapper_CPU_out_linalg_solve_triangular_out(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) {
17219 // No device check
17220 // DeviceGuard omitted
17221 return at::native::linalg_solve_triangular_out(self, B, upper, left, unitriangular, out);
17222}
17223} // anonymous namespace
17224namespace {
17225at::Tensor wrapper_CPU__cholesky(const at::Tensor & self, bool upper) {
17226 // No device check
17227 // DeviceGuard omitted
17228 return at::native::cholesky(self, upper);
17229}
17230} // anonymous namespace
17231namespace {
17232at::Tensor & wrapper_CPU_out_cholesky_out(const at::Tensor & self, bool upper, at::Tensor & out) {
17233 // No device check
17234 // DeviceGuard omitted
17235 return at::native::cholesky_out(self, upper, out);
17236}
17237} // anonymous namespace
17238namespace {
17239at::Tensor wrapper_CPU___cholesky_solve_helper(const at::Tensor & self, const at::Tensor & A, bool upper) {
17240 // No device check
17241 // DeviceGuard omitted
17242 return at::native::_cholesky_solve_helper_cpu(self, A, upper);
17243}
17244} // anonymous namespace
17245namespace {
17246at::Tensor wrapper_CPU__cholesky_inverse(const at::Tensor & self, bool upper) {
17247 // No device check
17248 // DeviceGuard omitted
17249 return at::native::cholesky_inverse(self, upper);
17250}
17251} // anonymous namespace
17252namespace {
17253at::Tensor & wrapper_CPU_out_cholesky_inverse_out(const at::Tensor & self, bool upper, at::Tensor & out) {
17254 // No device check
17255 // DeviceGuard omitted
17256 return at::native::cholesky_inverse_out(self, upper, out);
17257}
17258} // anonymous namespace
17259namespace {
17260::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__geqrf(const at::Tensor & self) {
17261 // No device check
17262 // DeviceGuard omitted
17263 return at::native::geqrf(self);
17264}
17265} // anonymous namespace
17266namespace {
17267::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_a_geqrf_out(const at::Tensor & self, at::Tensor & a, at::Tensor & tau) {
17268 // No device check
17269 // DeviceGuard omitted
17270 return at::native::geqrf_out(self, a, tau);
17271}
17272} // anonymous namespace
17273namespace {
17274at::Tensor wrapper_CPU__ormqr(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
17275 // No device check
17276 // DeviceGuard omitted
17277 return at::native::ormqr(self, input2, input3, left, transpose);
17278}
17279} // anonymous namespace
17280namespace {
17281at::Tensor & wrapper_CPU_out_ormqr_out(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) {
17282 // No device check
17283 // DeviceGuard omitted
17284 return at::native::ormqr_out(self, input2, input3, left, transpose, out);
17285}
17286} // anonymous namespace
17287struct structured_lu_unpack_out_functional final : public at::native::structured_lu_unpack_out {
17288 void set_output_strided(
17289 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17290 TensorOptions options, DimnameList names
17291 ) override {
17292 outputs_[output_idx] = create_out(sizes, strides, options);
17293 if (!names.empty()) {
17294 namedinference::propagate_names(*outputs_[output_idx], names);
17295 }
17296 // super must happen after, so that downstream can use maybe_get_output
17297 // to retrieve the output
17298 }
17299 void set_output_raw_strided(
17300 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17301 TensorOptions options, DimnameList names
17302 ) override {
17303 outputs_[output_idx] = create_out(sizes, strides, options);
17304 if (!names.empty()) {
17305 namedinference::propagate_names(*outputs_[output_idx], names);
17306 }
17307 // super must happen after, so that downstream can use maybe_get_output
17308 // to retrieve the output
17309 }
17310 const Tensor& maybe_get_output(int64_t output_idx) override {
17311 return *outputs_[output_idx];
17312 }
17313 std::array<c10::ExclusivelyOwned<Tensor>, 3> outputs_;
17314};
17315::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU_lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
17316structured_lu_unpack_out_functional op;
17317op.meta(LU_data, LU_pivots, unpack_data, unpack_pivots);
17318op.impl(LU_data, LU_pivots, unpack_data, unpack_pivots, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2]);
17319return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take());
17320}
17321struct structured_lu_unpack_out_out final : public at::native::structured_lu_unpack_out {
17322 structured_lu_unpack_out_out(Tensor& out0, Tensor& out1, Tensor& out2) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2) } {}
17323 void set_output_strided(
17324 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17325 TensorOptions options, DimnameList names
17326 ) override {
17327 const auto& out = outputs_[output_idx].get();
17328 resize_out(out, sizes, strides, options);
17329 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
17330 if (C10_UNLIKELY(maybe_proxy.has_value())) {
17331 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
17332 }
17333 if (!names.empty()) {
17334 namedinference::propagate_names(outputs_[output_idx], names);
17335 }
17336 // super must happen after, so that downstream can use maybe_get_output
17337 // to retrieve the output
17338 }
17339 void set_output_raw_strided(
17340 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17341 TensorOptions options, DimnameList names
17342 ) override {
17343 const auto& out = outputs_[output_idx].get();
17344 resize_out(out, sizes, strides, options);
17345 if (!names.empty()) {
17346 namedinference::propagate_names(outputs_[output_idx], names);
17347 }
17348 // super must happen after, so that downstream can use maybe_get_output
17349 // to retrieve the output
17350 }
17351 const Tensor& maybe_get_output(int64_t output_idx) override {
17352 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
17353 }
17354 std::array<std::reference_wrapper<Tensor>, 3> outputs_;
17355 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 3> proxy_outputs_;
17356};
17357::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_lu_unpack_out_out(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
17358structured_lu_unpack_out_out op(P, L, U);
17359op.meta(LU_data, LU_pivots, unpack_data, unpack_pivots);
17360op.impl(LU_data, LU_pivots, unpack_data, unpack_pivots, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2));
17361if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17362if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
17363if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(**op.proxy_outputs_[2]);
17364return std::forward_as_tuple(P, L, U);
17365}
17366namespace {
17367at::Tensor wrapper_CPU__multinomial(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator) {
17368 // No device check
17369 // DeviceGuard omitted
17370 return at::native::multinomial(self, num_samples, replacement, generator);
17371}
17372} // anonymous namespace
17373namespace {
17374at::Tensor & wrapper_CPU_out_multinomial_out(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator, at::Tensor & out) {
17375 // No device check
17376 // DeviceGuard omitted
17377 return at::native::multinomial_out(self, num_samples, replacement, generator, out);
17378}
17379} // anonymous namespace
17380struct structured_lgamma_out_functional final : public at::native::structured_lgamma_out {
17381 void set_output_strided(
17382 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17383 TensorOptions options, DimnameList names
17384 ) override {
17385 outputs_[output_idx] = create_out(sizes, strides, options);
17386 if (!names.empty()) {
17387 namedinference::propagate_names(*outputs_[output_idx], names);
17388 }
17389 // super must happen after, so that downstream can use maybe_get_output
17390 // to retrieve the output
17391 at::native::structured_lgamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17392 }
17393 void set_output_raw_strided(
17394 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17395 TensorOptions options, DimnameList names
17396 ) override {
17397 outputs_[output_idx] = create_out(sizes, strides, options);
17398 if (!names.empty()) {
17399 namedinference::propagate_names(*outputs_[output_idx], names);
17400 }
17401 // super must happen after, so that downstream can use maybe_get_output
17402 // to retrieve the output
17403 at::native::structured_lgamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17404 }
17405 const Tensor& maybe_get_output(int64_t output_idx) override {
17406 return *outputs_[output_idx];
17407 }
17408 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17409};
17410at::Tensor wrapper_CPU_lgamma(const at::Tensor & self) {
17411structured_lgamma_out_functional op;
17412op.meta(self);
17413op.impl(self, *op.outputs_[0]);
17414return std::move(op.outputs_[0]).take();
17415}
17416struct structured_lgamma_out_out final : public at::native::structured_lgamma_out {
17417 structured_lgamma_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
17418 void set_output_strided(
17419 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17420 TensorOptions options, DimnameList names
17421 ) override {
17422 const auto& out = outputs_[output_idx].get();
17423 resize_out(out, sizes, strides, options);
17424 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
17425 if (C10_UNLIKELY(maybe_proxy.has_value())) {
17426 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
17427 }
17428 if (!names.empty()) {
17429 namedinference::propagate_names(outputs_[output_idx], names);
17430 }
17431 // super must happen after, so that downstream can use maybe_get_output
17432 // to retrieve the output
17433 at::native::structured_lgamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17434 }
17435 void set_output_raw_strided(
17436 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17437 TensorOptions options, DimnameList names
17438 ) override {
17439 const auto& out = outputs_[output_idx].get();
17440 resize_out(out, sizes, strides, options);
17441 if (!names.empty()) {
17442 namedinference::propagate_names(outputs_[output_idx], names);
17443 }
17444 // super must happen after, so that downstream can use maybe_get_output
17445 // to retrieve the output
17446 at::native::structured_lgamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17447 }
17448 const Tensor& maybe_get_output(int64_t output_idx) override {
17449 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
17450 }
17451 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
17452 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
17453};
17454at::Tensor & wrapper_CPU_lgamma_out_out(const at::Tensor & self, at::Tensor & out) {
17455structured_lgamma_out_out op(out);
17456op.meta(self);
17457op.impl(self, op.maybe_get_output(0));
17458if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17459return out;
17460}
17461struct structured_lgamma_out_inplace final : public at::native::structured_lgamma_out {
17462 structured_lgamma_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
17463 void set_output_strided(
17464 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17465 TensorOptions options, DimnameList names
17466 ) override {
17467 const auto& out = outputs_[output_idx].get();
17468 check_inplace(out, sizes, options);
17469 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
17470 if (C10_UNLIKELY(maybe_proxy.has_value())) {
17471 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
17472 }
17473 if (!names.empty()) {
17474 namedinference::propagate_names(outputs_[output_idx], names);
17475 }
17476 // super must happen after, so that downstream can use maybe_get_output
17477 // to retrieve the output
17478 at::native::structured_lgamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17479 }
17480 void set_output_raw_strided(
17481 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17482 TensorOptions options, DimnameList names
17483 ) override {
17484 const auto& out = outputs_[output_idx].get();
17485 check_inplace(out, sizes, options);
17486 if (!names.empty()) {
17487 namedinference::propagate_names(outputs_[output_idx], names);
17488 }
17489 // super must happen after, so that downstream can use maybe_get_output
17490 // to retrieve the output
17491 at::native::structured_lgamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17492 }
17493 const Tensor& maybe_get_output(int64_t output_idx) override {
17494 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
17495 }
17496 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
17497 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
17498};
17499at::Tensor & wrapper_CPU_lgamma_(at::Tensor & self) {
17500structured_lgamma_out_inplace op(self);
17501op.meta(self);
17502op.impl(self, op.outputs_[0]);
17503if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17504return self;
17505}
17506struct structured_polygamma_out_functional final : public at::native::structured_polygamma_out {
17507 void set_output_strided(
17508 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17509 TensorOptions options, DimnameList names
17510 ) override {
17511 outputs_[output_idx] = create_out(sizes, strides, options);
17512 if (!names.empty()) {
17513 namedinference::propagate_names(*outputs_[output_idx], names);
17514 }
17515 // super must happen after, so that downstream can use maybe_get_output
17516 // to retrieve the output
17517 at::native::structured_polygamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17518 }
17519 void set_output_raw_strided(
17520 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17521 TensorOptions options, DimnameList names
17522 ) override {
17523 outputs_[output_idx] = create_out(sizes, strides, options);
17524 if (!names.empty()) {
17525 namedinference::propagate_names(*outputs_[output_idx], names);
17526 }
17527 // super must happen after, so that downstream can use maybe_get_output
17528 // to retrieve the output
17529 at::native::structured_polygamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17530 }
17531 const Tensor& maybe_get_output(int64_t output_idx) override {
17532 return *outputs_[output_idx];
17533 }
17534 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17535};
17536at::Tensor wrapper_CPU_polygamma(int64_t n, const at::Tensor & self) {
17537structured_polygamma_out_functional op;
17538op.meta(n, self);
17539op.impl(n, self, *op.outputs_[0]);
17540return std::move(op.outputs_[0]).take();
17541}
17542struct structured_polygamma_out_out final : public at::native::structured_polygamma_out {
17543 structured_polygamma_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
17544 void set_output_strided(
17545 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17546 TensorOptions options, DimnameList names
17547 ) override {
17548 const auto& out = outputs_[output_idx].get();
17549 resize_out(out, sizes, strides, options);
17550 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
17551 if (C10_UNLIKELY(maybe_proxy.has_value())) {
17552 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
17553 }
17554 if (!names.empty()) {
17555 namedinference::propagate_names(outputs_[output_idx], names);
17556 }
17557 // super must happen after, so that downstream can use maybe_get_output
17558 // to retrieve the output
17559 at::native::structured_polygamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17560 }
17561 void set_output_raw_strided(
17562 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17563 TensorOptions options, DimnameList names
17564 ) override {
17565 const auto& out = outputs_[output_idx].get();
17566 resize_out(out, sizes, strides, options);
17567 if (!names.empty()) {
17568 namedinference::propagate_names(outputs_[output_idx], names);
17569 }
17570 // super must happen after, so that downstream can use maybe_get_output
17571 // to retrieve the output
17572 at::native::structured_polygamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17573 }
17574 const Tensor& maybe_get_output(int64_t output_idx) override {
17575 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
17576 }
17577 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
17578 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
17579};
17580at::Tensor & wrapper_CPU_polygamma_out_out(int64_t n, const at::Tensor & self, at::Tensor & out) {
17581structured_polygamma_out_out op(out);
17582op.meta(n, self);
17583op.impl(n, self, op.maybe_get_output(0));
17584if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17585return out;
17586}
17587struct structured_erfinv_out_functional final : public at::native::structured_erfinv_out {
17588 void set_output_strided(
17589 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17590 TensorOptions options, DimnameList names
17591 ) override {
17592 outputs_[output_idx] = create_out(sizes, strides, options);
17593 if (!names.empty()) {
17594 namedinference::propagate_names(*outputs_[output_idx], names);
17595 }
17596 // super must happen after, so that downstream can use maybe_get_output
17597 // to retrieve the output
17598 at::native::structured_erfinv_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17599 }
17600 void set_output_raw_strided(
17601 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17602 TensorOptions options, DimnameList names
17603 ) override {
17604 outputs_[output_idx] = create_out(sizes, strides, options);
17605 if (!names.empty()) {
17606 namedinference::propagate_names(*outputs_[output_idx], names);
17607 }
17608 // super must happen after, so that downstream can use maybe_get_output
17609 // to retrieve the output
17610 at::native::structured_erfinv_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17611 }
17612 const Tensor& maybe_get_output(int64_t output_idx) override {
17613 return *outputs_[output_idx];
17614 }
17615 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17616};
17617at::Tensor wrapper_CPU_erfinv(const at::Tensor & self) {
17618structured_erfinv_out_functional op;
17619op.meta(self);
17620op.impl(self, *op.outputs_[0]);
17621return std::move(op.outputs_[0]).take();
17622}
17623struct structured_erfinv_out_out final : public at::native::structured_erfinv_out {
17624 structured_erfinv_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
17625 void set_output_strided(
17626 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17627 TensorOptions options, DimnameList names
17628 ) override {
17629 const auto& out = outputs_[output_idx].get();
17630 resize_out(out, sizes, strides, options);
17631 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
17632 if (C10_UNLIKELY(maybe_proxy.has_value())) {
17633 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
17634 }
17635 if (!names.empty()) {
17636 namedinference::propagate_names(outputs_[output_idx], names);
17637 }
17638 // super must happen after, so that downstream can use maybe_get_output
17639 // to retrieve the output
17640 at::native::structured_erfinv_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17641 }
17642 void set_output_raw_strided(
17643 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17644 TensorOptions options, DimnameList names
17645 ) override {
17646 const auto& out = outputs_[output_idx].get();
17647 resize_out(out, sizes, strides, options);
17648 if (!names.empty()) {
17649 namedinference::propagate_names(outputs_[output_idx], names);
17650 }
17651 // super must happen after, so that downstream can use maybe_get_output
17652 // to retrieve the output
17653 at::native::structured_erfinv_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17654 }
17655 const Tensor& maybe_get_output(int64_t output_idx) override {
17656 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
17657 }
17658 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
17659 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
17660};
17661at::Tensor & wrapper_CPU_erfinv_out_out(const at::Tensor & self, at::Tensor & out) {
17662structured_erfinv_out_out op(out);
17663op.meta(self);
17664op.impl(self, op.maybe_get_output(0));
17665if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17666return out;
17667}
17668struct structured_erfinv_out_inplace final : public at::native::structured_erfinv_out {
17669 structured_erfinv_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
17670 void set_output_strided(
17671 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17672 TensorOptions options, DimnameList names
17673 ) override {
17674 const auto& out = outputs_[output_idx].get();
17675 check_inplace(out, sizes, options);
17676 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
17677 if (C10_UNLIKELY(maybe_proxy.has_value())) {
17678 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
17679 }
17680 if (!names.empty()) {
17681 namedinference::propagate_names(outputs_[output_idx], names);
17682 }
17683 // super must happen after, so that downstream can use maybe_get_output
17684 // to retrieve the output
17685 at::native::structured_erfinv_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17686 }
17687 void set_output_raw_strided(
17688 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17689 TensorOptions options, DimnameList names
17690 ) override {
17691 const auto& out = outputs_[output_idx].get();
17692 check_inplace(out, sizes, options);
17693 if (!names.empty()) {
17694 namedinference::propagate_names(outputs_[output_idx], names);
17695 }
17696 // super must happen after, so that downstream can use maybe_get_output
17697 // to retrieve the output
17698 at::native::structured_erfinv_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17699 }
17700 const Tensor& maybe_get_output(int64_t output_idx) override {
17701 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
17702 }
17703 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
17704 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
17705};
17706at::Tensor & wrapper_CPU_erfinv_(at::Tensor & self) {
17707structured_erfinv_out_inplace op(self);
17708op.meta(self);
17709op.impl(self, op.outputs_[0]);
17710if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17711return self;
17712}
17713struct structured_i0_out_functional final : public at::native::structured_i0_out {
17714 void set_output_strided(
17715 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17716 TensorOptions options, DimnameList names
17717 ) override {
17718 outputs_[output_idx] = create_out(sizes, strides, options);
17719 if (!names.empty()) {
17720 namedinference::propagate_names(*outputs_[output_idx], names);
17721 }
17722 // super must happen after, so that downstream can use maybe_get_output
17723 // to retrieve the output
17724 at::native::structured_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17725 }
17726 void set_output_raw_strided(
17727 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17728 TensorOptions options, DimnameList names
17729 ) override {
17730 outputs_[output_idx] = create_out(sizes, strides, options);
17731 if (!names.empty()) {
17732 namedinference::propagate_names(*outputs_[output_idx], names);
17733 }
17734 // super must happen after, so that downstream can use maybe_get_output
17735 // to retrieve the output
17736 at::native::structured_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17737 }
17738 const Tensor& maybe_get_output(int64_t output_idx) override {
17739 return *outputs_[output_idx];
17740 }
17741 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17742};
17743at::Tensor wrapper_CPU_i0(const at::Tensor & self) {
17744structured_i0_out_functional op;
17745op.meta(self);
17746op.impl(self, *op.outputs_[0]);
17747return std::move(op.outputs_[0]).take();
17748}
17749struct structured_i0_out_out final : public at::native::structured_i0_out {
17750 structured_i0_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
17751 void set_output_strided(
17752 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17753 TensorOptions options, DimnameList names
17754 ) override {
17755 const auto& out = outputs_[output_idx].get();
17756 resize_out(out, sizes, strides, options);
17757 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
17758 if (C10_UNLIKELY(maybe_proxy.has_value())) {
17759 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
17760 }
17761 if (!names.empty()) {
17762 namedinference::propagate_names(outputs_[output_idx], names);
17763 }
17764 // super must happen after, so that downstream can use maybe_get_output
17765 // to retrieve the output
17766 at::native::structured_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17767 }
17768 void set_output_raw_strided(
17769 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17770 TensorOptions options, DimnameList names
17771 ) override {
17772 const auto& out = outputs_[output_idx].get();
17773 resize_out(out, sizes, strides, options);
17774 if (!names.empty()) {
17775 namedinference::propagate_names(outputs_[output_idx], names);
17776 }
17777 // super must happen after, so that downstream can use maybe_get_output
17778 // to retrieve the output
17779 at::native::structured_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17780 }
17781 const Tensor& maybe_get_output(int64_t output_idx) override {
17782 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
17783 }
17784 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
17785 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
17786};
17787at::Tensor & wrapper_CPU_i0_out_out(const at::Tensor & self, at::Tensor & out) {
17788structured_i0_out_out op(out);
17789op.meta(self);
17790op.impl(self, op.maybe_get_output(0));
17791if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17792return out;
17793}
17794struct structured_i0_out_inplace final : public at::native::structured_i0_out {
17795 structured_i0_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
17796 void set_output_strided(
17797 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17798 TensorOptions options, DimnameList names
17799 ) override {
17800 const auto& out = outputs_[output_idx].get();
17801 check_inplace(out, sizes, options);
17802 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
17803 if (C10_UNLIKELY(maybe_proxy.has_value())) {
17804 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
17805 }
17806 if (!names.empty()) {
17807 namedinference::propagate_names(outputs_[output_idx], names);
17808 }
17809 // super must happen after, so that downstream can use maybe_get_output
17810 // to retrieve the output
17811 at::native::structured_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17812 }
17813 void set_output_raw_strided(
17814 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17815 TensorOptions options, DimnameList names
17816 ) override {
17817 const auto& out = outputs_[output_idx].get();
17818 check_inplace(out, sizes, options);
17819 if (!names.empty()) {
17820 namedinference::propagate_names(outputs_[output_idx], names);
17821 }
17822 // super must happen after, so that downstream can use maybe_get_output
17823 // to retrieve the output
17824 at::native::structured_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17825 }
17826 const Tensor& maybe_get_output(int64_t output_idx) override {
17827 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
17828 }
17829 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
17830 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
17831};
17832at::Tensor & wrapper_CPU_i0_(at::Tensor & self) {
17833structured_i0_out_inplace op(self);
17834op.meta(self);
17835op.impl(self, op.outputs_[0]);
17836if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17837return self;
17838}
17839struct structured_sign_out_functional final : public at::native::structured_sign_out {
17840 void set_output_strided(
17841 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17842 TensorOptions options, DimnameList names
17843 ) override {
17844 outputs_[output_idx] = create_out(sizes, strides, options);
17845 if (!names.empty()) {
17846 namedinference::propagate_names(*outputs_[output_idx], names);
17847 }
17848 // super must happen after, so that downstream can use maybe_get_output
17849 // to retrieve the output
17850 at::native::structured_sign_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17851 }
17852 void set_output_raw_strided(
17853 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17854 TensorOptions options, DimnameList names
17855 ) override {
17856 outputs_[output_idx] = create_out(sizes, strides, options);
17857 if (!names.empty()) {
17858 namedinference::propagate_names(*outputs_[output_idx], names);
17859 }
17860 // super must happen after, so that downstream can use maybe_get_output
17861 // to retrieve the output
17862 at::native::structured_sign_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17863 }
17864 const Tensor& maybe_get_output(int64_t output_idx) override {
17865 return *outputs_[output_idx];
17866 }
17867 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17868};
17869at::Tensor wrapper_CPU_sign(const at::Tensor & self) {
17870structured_sign_out_functional op;
17871op.meta(self);
17872op.impl(self, *op.outputs_[0]);
17873return std::move(op.outputs_[0]).take();
17874}
17875struct structured_sign_out_out final : public at::native::structured_sign_out {
17876 structured_sign_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
17877 void set_output_strided(
17878 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17879 TensorOptions options, DimnameList names
17880 ) override {
17881 const auto& out = outputs_[output_idx].get();
17882 resize_out(out, sizes, strides, options);
17883 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
17884 if (C10_UNLIKELY(maybe_proxy.has_value())) {
17885 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
17886 }
17887 if (!names.empty()) {
17888 namedinference::propagate_names(outputs_[output_idx], names);
17889 }
17890 // super must happen after, so that downstream can use maybe_get_output
17891 // to retrieve the output
17892 at::native::structured_sign_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17893 }
17894 void set_output_raw_strided(
17895 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17896 TensorOptions options, DimnameList names
17897 ) override {
17898 const auto& out = outputs_[output_idx].get();
17899 resize_out(out, sizes, strides, options);
17900 if (!names.empty()) {
17901 namedinference::propagate_names(outputs_[output_idx], names);
17902 }
17903 // super must happen after, so that downstream can use maybe_get_output
17904 // to retrieve the output
17905 at::native::structured_sign_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17906 }
17907 const Tensor& maybe_get_output(int64_t output_idx) override {
17908 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
17909 }
17910 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
17911 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
17912};
17913at::Tensor & wrapper_CPU_sign_out_out(const at::Tensor & self, at::Tensor & out) {
17914structured_sign_out_out op(out);
17915op.meta(self);
17916op.impl(self, op.maybe_get_output(0));
17917if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17918return out;
17919}
17920struct structured_sign_out_inplace final : public at::native::structured_sign_out {
17921 structured_sign_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
17922 void set_output_strided(
17923 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17924 TensorOptions options, DimnameList names
17925 ) override {
17926 const auto& out = outputs_[output_idx].get();
17927 check_inplace(out, sizes, options);
17928 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
17929 if (C10_UNLIKELY(maybe_proxy.has_value())) {
17930 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
17931 }
17932 if (!names.empty()) {
17933 namedinference::propagate_names(outputs_[output_idx], names);
17934 }
17935 // super must happen after, so that downstream can use maybe_get_output
17936 // to retrieve the output
17937 at::native::structured_sign_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17938 }
17939 void set_output_raw_strided(
17940 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17941 TensorOptions options, DimnameList names
17942 ) override {
17943 const auto& out = outputs_[output_idx].get();
17944 check_inplace(out, sizes, options);
17945 if (!names.empty()) {
17946 namedinference::propagate_names(outputs_[output_idx], names);
17947 }
17948 // super must happen after, so that downstream can use maybe_get_output
17949 // to retrieve the output
17950 at::native::structured_sign_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17951 }
17952 const Tensor& maybe_get_output(int64_t output_idx) override {
17953 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
17954 }
17955 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
17956 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
17957};
17958at::Tensor & wrapper_CPU_sign_(at::Tensor & self) {
17959structured_sign_out_inplace op(self);
17960op.meta(self);
17961op.impl(self, op.outputs_[0]);
17962if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
17963return self;
17964}
17965struct structured_signbit_out_functional final : public at::native::structured_signbit_out {
17966 void set_output_strided(
17967 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17968 TensorOptions options, DimnameList names
17969 ) override {
17970 outputs_[output_idx] = create_out(sizes, strides, options);
17971 if (!names.empty()) {
17972 namedinference::propagate_names(*outputs_[output_idx], names);
17973 }
17974 // super must happen after, so that downstream can use maybe_get_output
17975 // to retrieve the output
17976 at::native::structured_signbit_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17977 }
17978 void set_output_raw_strided(
17979 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
17980 TensorOptions options, DimnameList names
17981 ) override {
17982 outputs_[output_idx] = create_out(sizes, strides, options);
17983 if (!names.empty()) {
17984 namedinference::propagate_names(*outputs_[output_idx], names);
17985 }
17986 // super must happen after, so that downstream can use maybe_get_output
17987 // to retrieve the output
17988 at::native::structured_signbit_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
17989 }
17990 const Tensor& maybe_get_output(int64_t output_idx) override {
17991 return *outputs_[output_idx];
17992 }
17993 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
17994};
17995at::Tensor wrapper_CPU_signbit(const at::Tensor & self) {
17996structured_signbit_out_functional op;
17997op.meta(self);
17998op.impl(self, *op.outputs_[0]);
17999return std::move(op.outputs_[0]).take();
18000}
18001struct structured_signbit_out_out final : public at::native::structured_signbit_out {
18002 structured_signbit_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
18003 void set_output_strided(
18004 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18005 TensorOptions options, DimnameList names
18006 ) override {
18007 const auto& out = outputs_[output_idx].get();
18008 resize_out(out, sizes, strides, options);
18009 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18010 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18011 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18012 }
18013 if (!names.empty()) {
18014 namedinference::propagate_names(outputs_[output_idx], names);
18015 }
18016 // super must happen after, so that downstream can use maybe_get_output
18017 // to retrieve the output
18018 at::native::structured_signbit_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18019 }
18020 void set_output_raw_strided(
18021 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18022 TensorOptions options, DimnameList names
18023 ) override {
18024 const auto& out = outputs_[output_idx].get();
18025 resize_out(out, sizes, strides, options);
18026 if (!names.empty()) {
18027 namedinference::propagate_names(outputs_[output_idx], names);
18028 }
18029 // super must happen after, so that downstream can use maybe_get_output
18030 // to retrieve the output
18031 at::native::structured_signbit_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18032 }
18033 const Tensor& maybe_get_output(int64_t output_idx) override {
18034 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18035 }
18036 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18037 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18038};
18039at::Tensor & wrapper_CPU_signbit_out_out(const at::Tensor & self, at::Tensor & out) {
18040structured_signbit_out_out op(out);
18041op.meta(self);
18042op.impl(self, op.maybe_get_output(0));
18043if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18044return out;
18045}
18046struct structured_atan2_out_functional final : public at::native::structured_atan2_out {
18047 void set_output_strided(
18048 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18049 TensorOptions options, DimnameList names
18050 ) override {
18051 outputs_[output_idx] = create_out(sizes, strides, options);
18052 if (!names.empty()) {
18053 namedinference::propagate_names(*outputs_[output_idx], names);
18054 }
18055 // super must happen after, so that downstream can use maybe_get_output
18056 // to retrieve the output
18057 at::native::structured_atan2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18058 }
18059 void set_output_raw_strided(
18060 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18061 TensorOptions options, DimnameList names
18062 ) override {
18063 outputs_[output_idx] = create_out(sizes, strides, options);
18064 if (!names.empty()) {
18065 namedinference::propagate_names(*outputs_[output_idx], names);
18066 }
18067 // super must happen after, so that downstream can use maybe_get_output
18068 // to retrieve the output
18069 at::native::structured_atan2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18070 }
18071 const Tensor& maybe_get_output(int64_t output_idx) override {
18072 return *outputs_[output_idx];
18073 }
18074 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18075};
18076at::Tensor wrapper_CPU_atan2(const at::Tensor & self, const at::Tensor & other) {
18077structured_atan2_out_functional op;
18078op.meta(self, other);
18079op.impl(self, other, *op.outputs_[0]);
18080return std::move(op.outputs_[0]).take();
18081}
18082struct structured_atan2_out_out final : public at::native::structured_atan2_out {
18083 structured_atan2_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
18084 void set_output_strided(
18085 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18086 TensorOptions options, DimnameList names
18087 ) override {
18088 const auto& out = outputs_[output_idx].get();
18089 resize_out(out, sizes, strides, options);
18090 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18091 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18092 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18093 }
18094 if (!names.empty()) {
18095 namedinference::propagate_names(outputs_[output_idx], names);
18096 }
18097 // super must happen after, so that downstream can use maybe_get_output
18098 // to retrieve the output
18099 at::native::structured_atan2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18100 }
18101 void set_output_raw_strided(
18102 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18103 TensorOptions options, DimnameList names
18104 ) override {
18105 const auto& out = outputs_[output_idx].get();
18106 resize_out(out, sizes, strides, options);
18107 if (!names.empty()) {
18108 namedinference::propagate_names(outputs_[output_idx], names);
18109 }
18110 // super must happen after, so that downstream can use maybe_get_output
18111 // to retrieve the output
18112 at::native::structured_atan2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18113 }
18114 const Tensor& maybe_get_output(int64_t output_idx) override {
18115 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18116 }
18117 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18118 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18119};
18120at::Tensor & wrapper_CPU_atan2_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
18121structured_atan2_out_out op(out);
18122op.meta(self, other);
18123op.impl(self, other, op.maybe_get_output(0));
18124if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18125return out;
18126}
18127struct structured_atan2_out_inplace final : public at::native::structured_atan2_out {
18128 structured_atan2_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
18129 void set_output_strided(
18130 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18131 TensorOptions options, DimnameList names
18132 ) override {
18133 const auto& out = outputs_[output_idx].get();
18134 check_inplace(out, sizes, options);
18135 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18136 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18137 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18138 }
18139 if (!names.empty()) {
18140 namedinference::propagate_names(outputs_[output_idx], names);
18141 }
18142 // super must happen after, so that downstream can use maybe_get_output
18143 // to retrieve the output
18144 at::native::structured_atan2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18145 }
18146 void set_output_raw_strided(
18147 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18148 TensorOptions options, DimnameList names
18149 ) override {
18150 const auto& out = outputs_[output_idx].get();
18151 check_inplace(out, sizes, options);
18152 if (!names.empty()) {
18153 namedinference::propagate_names(outputs_[output_idx], names);
18154 }
18155 // super must happen after, so that downstream can use maybe_get_output
18156 // to retrieve the output
18157 at::native::structured_atan2_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18158 }
18159 const Tensor& maybe_get_output(int64_t output_idx) override {
18160 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18161 }
18162 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18163 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18164};
18165at::Tensor & wrapper_CPU_atan2_(at::Tensor & self, const at::Tensor & other) {
18166structured_atan2_out_inplace op(self);
18167op.meta(self, other);
18168op.impl(self, other, op.outputs_[0]);
18169if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18170return self;
18171}
18172namespace {
18173at::Tensor wrapper_CPU__histc(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
18174 // No device check
18175 // DeviceGuard omitted
18176 return at::native::histogram_histc_cpu(self, bins, min, max);
18177}
18178} // anonymous namespace
18179namespace {
18180at::Tensor & wrapper_CPU_out_histc_out(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) {
18181 // No device check
18182 // DeviceGuard omitted
18183 return at::native::histogram_histc_cpu_out(self, bins, min, max, out);
18184}
18185} // anonymous namespace
18186namespace {
18187::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_bins_tensor_histogram(const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density) {
18188 // No device check
18189 // DeviceGuard omitted
18190 return at::native::histogram_cpu(self, bins, weight, density);
18191}
18192} // anonymous namespace
18193namespace {
18194::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_bins_tensor_out_histogram_out(const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
18195 // No device check
18196 // DeviceGuard omitted
18197 return at::native::histogram_out_cpu(self, bins, weight, density, hist, bin_edges);
18198}
18199} // anonymous namespace
18200namespace {
18201::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_bin_ct_histogram(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
18202 // No device check
18203 // DeviceGuard omitted
18204 return at::native::histogram_cpu(self, bins, range, weight, density);
18205}
18206} // anonymous namespace
18207namespace {
18208::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_bin_ct_out_histogram_out(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
18209 // No device check
18210 // DeviceGuard omitted
18211 return at::native::histogram_out_cpu(self, bins, range, weight, density, hist, bin_edges);
18212}
18213} // anonymous namespace
18214namespace {
18215::std::vector<at::Tensor> wrapper_CPU___histogramdd_bin_edges(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
18216 // No device check
18217 // DeviceGuard omitted
18218 return at::native::histogramdd_bin_edges_cpu(self, bins, range, weight, density);
18219}
18220} // anonymous namespace
18221namespace {
18222at::Tensor wrapper_CPU___histogramdd_from_bin_cts(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
18223 // No device check
18224 // DeviceGuard omitted
18225 return at::native::histogramdd_cpu(self, bins, range, weight, density);
18226}
18227} // anonymous namespace
18228namespace {
18229at::Tensor wrapper_CPU___histogramdd_from_bin_tensors(const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density) {
18230 // No device check
18231 // DeviceGuard omitted
18232 return at::native::histogramdd_cpu(self, bins, weight, density);
18233}
18234} // anonymous namespace
18235struct structured_fmod_out_functional final : public at::native::structured_fmod_out {
18236 void set_output_strided(
18237 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18238 TensorOptions options, DimnameList names
18239 ) override {
18240 outputs_[output_idx] = create_out(sizes, strides, options);
18241 if (!names.empty()) {
18242 namedinference::propagate_names(*outputs_[output_idx], names);
18243 }
18244 // super must happen after, so that downstream can use maybe_get_output
18245 // to retrieve the output
18246 at::native::structured_fmod_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18247 }
18248 void set_output_raw_strided(
18249 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18250 TensorOptions options, DimnameList names
18251 ) override {
18252 outputs_[output_idx] = create_out(sizes, strides, options);
18253 if (!names.empty()) {
18254 namedinference::propagate_names(*outputs_[output_idx], names);
18255 }
18256 // super must happen after, so that downstream can use maybe_get_output
18257 // to retrieve the output
18258 at::native::structured_fmod_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18259 }
18260 const Tensor& maybe_get_output(int64_t output_idx) override {
18261 return *outputs_[output_idx];
18262 }
18263 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18264};
18265at::Tensor wrapper_CPU_fmod_Tensor(const at::Tensor & self, const at::Tensor & other) {
18266structured_fmod_out_functional op;
18267op.meta(self, other);
18268op.impl(self, other, *op.outputs_[0]);
18269return std::move(op.outputs_[0]).take();
18270}
18271struct structured_fmod_out_out final : public at::native::structured_fmod_out {
18272 structured_fmod_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
18273 void set_output_strided(
18274 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18275 TensorOptions options, DimnameList names
18276 ) override {
18277 const auto& out = outputs_[output_idx].get();
18278 resize_out(out, sizes, strides, options);
18279 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18280 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18281 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18282 }
18283 if (!names.empty()) {
18284 namedinference::propagate_names(outputs_[output_idx], names);
18285 }
18286 // super must happen after, so that downstream can use maybe_get_output
18287 // to retrieve the output
18288 at::native::structured_fmod_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18289 }
18290 void set_output_raw_strided(
18291 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18292 TensorOptions options, DimnameList names
18293 ) override {
18294 const auto& out = outputs_[output_idx].get();
18295 resize_out(out, sizes, strides, options);
18296 if (!names.empty()) {
18297 namedinference::propagate_names(outputs_[output_idx], names);
18298 }
18299 // super must happen after, so that downstream can use maybe_get_output
18300 // to retrieve the output
18301 at::native::structured_fmod_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18302 }
18303 const Tensor& maybe_get_output(int64_t output_idx) override {
18304 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18305 }
18306 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18307 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18308};
18309at::Tensor & wrapper_CPU_fmod_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
18310structured_fmod_out_out op(out);
18311op.meta(self, other);
18312op.impl(self, other, op.maybe_get_output(0));
18313if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18314return out;
18315}
18316struct structured_fmod_out_inplace final : public at::native::structured_fmod_out {
18317 structured_fmod_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
18318 void set_output_strided(
18319 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18320 TensorOptions options, DimnameList names
18321 ) override {
18322 const auto& out = outputs_[output_idx].get();
18323 check_inplace(out, sizes, options);
18324 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18325 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18326 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18327 }
18328 if (!names.empty()) {
18329 namedinference::propagate_names(outputs_[output_idx], names);
18330 }
18331 // super must happen after, so that downstream can use maybe_get_output
18332 // to retrieve the output
18333 at::native::structured_fmod_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18334 }
18335 void set_output_raw_strided(
18336 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18337 TensorOptions options, DimnameList names
18338 ) override {
18339 const auto& out = outputs_[output_idx].get();
18340 check_inplace(out, sizes, options);
18341 if (!names.empty()) {
18342 namedinference::propagate_names(outputs_[output_idx], names);
18343 }
18344 // super must happen after, so that downstream can use maybe_get_output
18345 // to retrieve the output
18346 at::native::structured_fmod_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18347 }
18348 const Tensor& maybe_get_output(int64_t output_idx) override {
18349 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18350 }
18351 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18352 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18353};
18354at::Tensor & wrapper_CPU_fmod__Tensor(at::Tensor & self, const at::Tensor & other) {
18355structured_fmod_out_inplace op(self);
18356op.meta(self, other);
18357op.impl(self, other, op.outputs_[0]);
18358if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18359return self;
18360}
18361struct structured_hypot_out_functional final : public at::native::structured_hypot_out {
18362 void set_output_strided(
18363 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18364 TensorOptions options, DimnameList names
18365 ) override {
18366 outputs_[output_idx] = create_out(sizes, strides, options);
18367 if (!names.empty()) {
18368 namedinference::propagate_names(*outputs_[output_idx], names);
18369 }
18370 // super must happen after, so that downstream can use maybe_get_output
18371 // to retrieve the output
18372 at::native::structured_hypot_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18373 }
18374 void set_output_raw_strided(
18375 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18376 TensorOptions options, DimnameList names
18377 ) override {
18378 outputs_[output_idx] = create_out(sizes, strides, options);
18379 if (!names.empty()) {
18380 namedinference::propagate_names(*outputs_[output_idx], names);
18381 }
18382 // super must happen after, so that downstream can use maybe_get_output
18383 // to retrieve the output
18384 at::native::structured_hypot_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18385 }
18386 const Tensor& maybe_get_output(int64_t output_idx) override {
18387 return *outputs_[output_idx];
18388 }
18389 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18390};
18391at::Tensor wrapper_CPU_hypot(const at::Tensor & self, const at::Tensor & other) {
18392structured_hypot_out_functional op;
18393op.meta(self, other);
18394op.impl(self, other, *op.outputs_[0]);
18395return std::move(op.outputs_[0]).take();
18396}
18397struct structured_hypot_out_out final : public at::native::structured_hypot_out {
18398 structured_hypot_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
18399 void set_output_strided(
18400 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18401 TensorOptions options, DimnameList names
18402 ) override {
18403 const auto& out = outputs_[output_idx].get();
18404 resize_out(out, sizes, strides, options);
18405 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18406 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18407 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18408 }
18409 if (!names.empty()) {
18410 namedinference::propagate_names(outputs_[output_idx], names);
18411 }
18412 // super must happen after, so that downstream can use maybe_get_output
18413 // to retrieve the output
18414 at::native::structured_hypot_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18415 }
18416 void set_output_raw_strided(
18417 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18418 TensorOptions options, DimnameList names
18419 ) override {
18420 const auto& out = outputs_[output_idx].get();
18421 resize_out(out, sizes, strides, options);
18422 if (!names.empty()) {
18423 namedinference::propagate_names(outputs_[output_idx], names);
18424 }
18425 // super must happen after, so that downstream can use maybe_get_output
18426 // to retrieve the output
18427 at::native::structured_hypot_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18428 }
18429 const Tensor& maybe_get_output(int64_t output_idx) override {
18430 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18431 }
18432 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18433 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18434};
18435at::Tensor & wrapper_CPU_hypot_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
18436structured_hypot_out_out op(out);
18437op.meta(self, other);
18438op.impl(self, other, op.maybe_get_output(0));
18439if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18440return out;
18441}
18442struct structured_hypot_out_inplace final : public at::native::structured_hypot_out {
18443 structured_hypot_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
18444 void set_output_strided(
18445 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18446 TensorOptions options, DimnameList names
18447 ) override {
18448 const auto& out = outputs_[output_idx].get();
18449 check_inplace(out, sizes, options);
18450 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18451 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18452 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18453 }
18454 if (!names.empty()) {
18455 namedinference::propagate_names(outputs_[output_idx], names);
18456 }
18457 // super must happen after, so that downstream can use maybe_get_output
18458 // to retrieve the output
18459 at::native::structured_hypot_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18460 }
18461 void set_output_raw_strided(
18462 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18463 TensorOptions options, DimnameList names
18464 ) override {
18465 const auto& out = outputs_[output_idx].get();
18466 check_inplace(out, sizes, options);
18467 if (!names.empty()) {
18468 namedinference::propagate_names(outputs_[output_idx], names);
18469 }
18470 // super must happen after, so that downstream can use maybe_get_output
18471 // to retrieve the output
18472 at::native::structured_hypot_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18473 }
18474 const Tensor& maybe_get_output(int64_t output_idx) override {
18475 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18476 }
18477 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18478 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18479};
18480at::Tensor & wrapper_CPU_hypot_(at::Tensor & self, const at::Tensor & other) {
18481structured_hypot_out_inplace op(self);
18482op.meta(self, other);
18483op.impl(self, other, op.outputs_[0]);
18484if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18485return self;
18486}
18487struct structured_igamma_out_functional final : public at::native::structured_igamma_out {
18488 void set_output_strided(
18489 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18490 TensorOptions options, DimnameList names
18491 ) override {
18492 outputs_[output_idx] = create_out(sizes, strides, options);
18493 if (!names.empty()) {
18494 namedinference::propagate_names(*outputs_[output_idx], names);
18495 }
18496 // super must happen after, so that downstream can use maybe_get_output
18497 // to retrieve the output
18498 at::native::structured_igamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18499 }
18500 void set_output_raw_strided(
18501 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18502 TensorOptions options, DimnameList names
18503 ) override {
18504 outputs_[output_idx] = create_out(sizes, strides, options);
18505 if (!names.empty()) {
18506 namedinference::propagate_names(*outputs_[output_idx], names);
18507 }
18508 // super must happen after, so that downstream can use maybe_get_output
18509 // to retrieve the output
18510 at::native::structured_igamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18511 }
18512 const Tensor& maybe_get_output(int64_t output_idx) override {
18513 return *outputs_[output_idx];
18514 }
18515 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18516};
18517at::Tensor wrapper_CPU_igamma(const at::Tensor & self, const at::Tensor & other) {
18518structured_igamma_out_functional op;
18519op.meta(self, other);
18520op.impl(self, other, *op.outputs_[0]);
18521return std::move(op.outputs_[0]).take();
18522}
18523struct structured_igamma_out_out final : public at::native::structured_igamma_out {
18524 structured_igamma_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
18525 void set_output_strided(
18526 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18527 TensorOptions options, DimnameList names
18528 ) override {
18529 const auto& out = outputs_[output_idx].get();
18530 resize_out(out, sizes, strides, options);
18531 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18532 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18533 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18534 }
18535 if (!names.empty()) {
18536 namedinference::propagate_names(outputs_[output_idx], names);
18537 }
18538 // super must happen after, so that downstream can use maybe_get_output
18539 // to retrieve the output
18540 at::native::structured_igamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18541 }
18542 void set_output_raw_strided(
18543 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18544 TensorOptions options, DimnameList names
18545 ) override {
18546 const auto& out = outputs_[output_idx].get();
18547 resize_out(out, sizes, strides, options);
18548 if (!names.empty()) {
18549 namedinference::propagate_names(outputs_[output_idx], names);
18550 }
18551 // super must happen after, so that downstream can use maybe_get_output
18552 // to retrieve the output
18553 at::native::structured_igamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18554 }
18555 const Tensor& maybe_get_output(int64_t output_idx) override {
18556 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18557 }
18558 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18559 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18560};
18561at::Tensor & wrapper_CPU_igamma_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
18562structured_igamma_out_out op(out);
18563op.meta(self, other);
18564op.impl(self, other, op.maybe_get_output(0));
18565if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18566return out;
18567}
18568struct structured_igamma_out_inplace final : public at::native::structured_igamma_out {
18569 structured_igamma_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
18570 void set_output_strided(
18571 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18572 TensorOptions options, DimnameList names
18573 ) override {
18574 const auto& out = outputs_[output_idx].get();
18575 check_inplace(out, sizes, options);
18576 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18577 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18578 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18579 }
18580 if (!names.empty()) {
18581 namedinference::propagate_names(outputs_[output_idx], names);
18582 }
18583 // super must happen after, so that downstream can use maybe_get_output
18584 // to retrieve the output
18585 at::native::structured_igamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18586 }
18587 void set_output_raw_strided(
18588 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18589 TensorOptions options, DimnameList names
18590 ) override {
18591 const auto& out = outputs_[output_idx].get();
18592 check_inplace(out, sizes, options);
18593 if (!names.empty()) {
18594 namedinference::propagate_names(outputs_[output_idx], names);
18595 }
18596 // super must happen after, so that downstream can use maybe_get_output
18597 // to retrieve the output
18598 at::native::structured_igamma_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18599 }
18600 const Tensor& maybe_get_output(int64_t output_idx) override {
18601 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18602 }
18603 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18604 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18605};
18606at::Tensor & wrapper_CPU_igamma_(at::Tensor & self, const at::Tensor & other) {
18607structured_igamma_out_inplace op(self);
18608op.meta(self, other);
18609op.impl(self, other, op.outputs_[0]);
18610if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18611return self;
18612}
18613struct structured_igammac_out_functional final : public at::native::structured_igammac_out {
18614 void set_output_strided(
18615 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18616 TensorOptions options, DimnameList names
18617 ) override {
18618 outputs_[output_idx] = create_out(sizes, strides, options);
18619 if (!names.empty()) {
18620 namedinference::propagate_names(*outputs_[output_idx], names);
18621 }
18622 // super must happen after, so that downstream can use maybe_get_output
18623 // to retrieve the output
18624 at::native::structured_igammac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18625 }
18626 void set_output_raw_strided(
18627 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18628 TensorOptions options, DimnameList names
18629 ) override {
18630 outputs_[output_idx] = create_out(sizes, strides, options);
18631 if (!names.empty()) {
18632 namedinference::propagate_names(*outputs_[output_idx], names);
18633 }
18634 // super must happen after, so that downstream can use maybe_get_output
18635 // to retrieve the output
18636 at::native::structured_igammac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18637 }
18638 const Tensor& maybe_get_output(int64_t output_idx) override {
18639 return *outputs_[output_idx];
18640 }
18641 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18642};
18643at::Tensor wrapper_CPU_igammac(const at::Tensor & self, const at::Tensor & other) {
18644structured_igammac_out_functional op;
18645op.meta(self, other);
18646op.impl(self, other, *op.outputs_[0]);
18647return std::move(op.outputs_[0]).take();
18648}
18649struct structured_igammac_out_out final : public at::native::structured_igammac_out {
18650 structured_igammac_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
18651 void set_output_strided(
18652 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18653 TensorOptions options, DimnameList names
18654 ) override {
18655 const auto& out = outputs_[output_idx].get();
18656 resize_out(out, sizes, strides, options);
18657 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18658 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18659 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18660 }
18661 if (!names.empty()) {
18662 namedinference::propagate_names(outputs_[output_idx], names);
18663 }
18664 // super must happen after, so that downstream can use maybe_get_output
18665 // to retrieve the output
18666 at::native::structured_igammac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18667 }
18668 void set_output_raw_strided(
18669 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18670 TensorOptions options, DimnameList names
18671 ) override {
18672 const auto& out = outputs_[output_idx].get();
18673 resize_out(out, sizes, strides, options);
18674 if (!names.empty()) {
18675 namedinference::propagate_names(outputs_[output_idx], names);
18676 }
18677 // super must happen after, so that downstream can use maybe_get_output
18678 // to retrieve the output
18679 at::native::structured_igammac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18680 }
18681 const Tensor& maybe_get_output(int64_t output_idx) override {
18682 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18683 }
18684 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18685 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18686};
18687at::Tensor & wrapper_CPU_igammac_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
18688structured_igammac_out_out op(out);
18689op.meta(self, other);
18690op.impl(self, other, op.maybe_get_output(0));
18691if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18692return out;
18693}
18694struct structured_igammac_out_inplace final : public at::native::structured_igammac_out {
18695 structured_igammac_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
18696 void set_output_strided(
18697 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18698 TensorOptions options, DimnameList names
18699 ) override {
18700 const auto& out = outputs_[output_idx].get();
18701 check_inplace(out, sizes, options);
18702 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18703 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18704 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18705 }
18706 if (!names.empty()) {
18707 namedinference::propagate_names(outputs_[output_idx], names);
18708 }
18709 // super must happen after, so that downstream can use maybe_get_output
18710 // to retrieve the output
18711 at::native::structured_igammac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18712 }
18713 void set_output_raw_strided(
18714 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18715 TensorOptions options, DimnameList names
18716 ) override {
18717 const auto& out = outputs_[output_idx].get();
18718 check_inplace(out, sizes, options);
18719 if (!names.empty()) {
18720 namedinference::propagate_names(outputs_[output_idx], names);
18721 }
18722 // super must happen after, so that downstream can use maybe_get_output
18723 // to retrieve the output
18724 at::native::structured_igammac_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18725 }
18726 const Tensor& maybe_get_output(int64_t output_idx) override {
18727 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18728 }
18729 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18730 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18731};
18732at::Tensor & wrapper_CPU_igammac_(at::Tensor & self, const at::Tensor & other) {
18733structured_igammac_out_inplace op(self);
18734op.meta(self, other);
18735op.impl(self, other, op.outputs_[0]);
18736if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18737return self;
18738}
18739struct structured_nextafter_out_functional final : public at::native::structured_nextafter_out {
18740 void set_output_strided(
18741 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18742 TensorOptions options, DimnameList names
18743 ) override {
18744 outputs_[output_idx] = create_out(sizes, strides, options);
18745 if (!names.empty()) {
18746 namedinference::propagate_names(*outputs_[output_idx], names);
18747 }
18748 // super must happen after, so that downstream can use maybe_get_output
18749 // to retrieve the output
18750 at::native::structured_nextafter_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18751 }
18752 void set_output_raw_strided(
18753 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18754 TensorOptions options, DimnameList names
18755 ) override {
18756 outputs_[output_idx] = create_out(sizes, strides, options);
18757 if (!names.empty()) {
18758 namedinference::propagate_names(*outputs_[output_idx], names);
18759 }
18760 // super must happen after, so that downstream can use maybe_get_output
18761 // to retrieve the output
18762 at::native::structured_nextafter_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18763 }
18764 const Tensor& maybe_get_output(int64_t output_idx) override {
18765 return *outputs_[output_idx];
18766 }
18767 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18768};
18769at::Tensor wrapper_CPU_nextafter(const at::Tensor & self, const at::Tensor & other) {
18770structured_nextafter_out_functional op;
18771op.meta(self, other);
18772op.impl(self, other, *op.outputs_[0]);
18773return std::move(op.outputs_[0]).take();
18774}
18775struct structured_nextafter_out_out final : public at::native::structured_nextafter_out {
18776 structured_nextafter_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
18777 void set_output_strided(
18778 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18779 TensorOptions options, DimnameList names
18780 ) override {
18781 const auto& out = outputs_[output_idx].get();
18782 resize_out(out, sizes, strides, options);
18783 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18784 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18785 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18786 }
18787 if (!names.empty()) {
18788 namedinference::propagate_names(outputs_[output_idx], names);
18789 }
18790 // super must happen after, so that downstream can use maybe_get_output
18791 // to retrieve the output
18792 at::native::structured_nextafter_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18793 }
18794 void set_output_raw_strided(
18795 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18796 TensorOptions options, DimnameList names
18797 ) override {
18798 const auto& out = outputs_[output_idx].get();
18799 resize_out(out, sizes, strides, options);
18800 if (!names.empty()) {
18801 namedinference::propagate_names(outputs_[output_idx], names);
18802 }
18803 // super must happen after, so that downstream can use maybe_get_output
18804 // to retrieve the output
18805 at::native::structured_nextafter_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18806 }
18807 const Tensor& maybe_get_output(int64_t output_idx) override {
18808 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18809 }
18810 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18811 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18812};
18813at::Tensor & wrapper_CPU_nextafter_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
18814structured_nextafter_out_out op(out);
18815op.meta(self, other);
18816op.impl(self, other, op.maybe_get_output(0));
18817if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18818return out;
18819}
18820struct structured_nextafter_out_inplace final : public at::native::structured_nextafter_out {
18821 structured_nextafter_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
18822 void set_output_strided(
18823 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18824 TensorOptions options, DimnameList names
18825 ) override {
18826 const auto& out = outputs_[output_idx].get();
18827 check_inplace(out, sizes, options);
18828 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18829 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18830 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18831 }
18832 if (!names.empty()) {
18833 namedinference::propagate_names(outputs_[output_idx], names);
18834 }
18835 // super must happen after, so that downstream can use maybe_get_output
18836 // to retrieve the output
18837 at::native::structured_nextafter_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18838 }
18839 void set_output_raw_strided(
18840 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18841 TensorOptions options, DimnameList names
18842 ) override {
18843 const auto& out = outputs_[output_idx].get();
18844 check_inplace(out, sizes, options);
18845 if (!names.empty()) {
18846 namedinference::propagate_names(outputs_[output_idx], names);
18847 }
18848 // super must happen after, so that downstream can use maybe_get_output
18849 // to retrieve the output
18850 at::native::structured_nextafter_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18851 }
18852 const Tensor& maybe_get_output(int64_t output_idx) override {
18853 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18854 }
18855 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18856 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18857};
18858at::Tensor & wrapper_CPU_nextafter_(at::Tensor & self, const at::Tensor & other) {
18859structured_nextafter_out_inplace op(self);
18860op.meta(self, other);
18861op.impl(self, other, op.outputs_[0]);
18862if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18863return self;
18864}
18865struct structured_remainder_out_functional final : public at::native::structured_remainder_out {
18866 void set_output_strided(
18867 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18868 TensorOptions options, DimnameList names
18869 ) override {
18870 outputs_[output_idx] = create_out(sizes, strides, options);
18871 if (!names.empty()) {
18872 namedinference::propagate_names(*outputs_[output_idx], names);
18873 }
18874 // super must happen after, so that downstream can use maybe_get_output
18875 // to retrieve the output
18876 at::native::structured_remainder_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18877 }
18878 void set_output_raw_strided(
18879 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18880 TensorOptions options, DimnameList names
18881 ) override {
18882 outputs_[output_idx] = create_out(sizes, strides, options);
18883 if (!names.empty()) {
18884 namedinference::propagate_names(*outputs_[output_idx], names);
18885 }
18886 // super must happen after, so that downstream can use maybe_get_output
18887 // to retrieve the output
18888 at::native::structured_remainder_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18889 }
18890 const Tensor& maybe_get_output(int64_t output_idx) override {
18891 return *outputs_[output_idx];
18892 }
18893 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
18894};
18895at::Tensor wrapper_CPU_remainder_Tensor(const at::Tensor & self, const at::Tensor & other) {
18896structured_remainder_out_functional op;
18897op.meta(self, other);
18898op.impl(self, other, *op.outputs_[0]);
18899return std::move(op.outputs_[0]).take();
18900}
18901struct structured_remainder_out_out final : public at::native::structured_remainder_out {
18902 structured_remainder_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
18903 void set_output_strided(
18904 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18905 TensorOptions options, DimnameList names
18906 ) override {
18907 const auto& out = outputs_[output_idx].get();
18908 resize_out(out, sizes, strides, options);
18909 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18910 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18911 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18912 }
18913 if (!names.empty()) {
18914 namedinference::propagate_names(outputs_[output_idx], names);
18915 }
18916 // super must happen after, so that downstream can use maybe_get_output
18917 // to retrieve the output
18918 at::native::structured_remainder_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18919 }
18920 void set_output_raw_strided(
18921 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18922 TensorOptions options, DimnameList names
18923 ) override {
18924 const auto& out = outputs_[output_idx].get();
18925 resize_out(out, sizes, strides, options);
18926 if (!names.empty()) {
18927 namedinference::propagate_names(outputs_[output_idx], names);
18928 }
18929 // super must happen after, so that downstream can use maybe_get_output
18930 // to retrieve the output
18931 at::native::structured_remainder_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18932 }
18933 const Tensor& maybe_get_output(int64_t output_idx) override {
18934 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18935 }
18936 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18937 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18938};
18939at::Tensor & wrapper_CPU_remainder_out_Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
18940structured_remainder_out_out op(out);
18941op.meta(self, other);
18942op.impl(self, other, op.maybe_get_output(0));
18943if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18944return out;
18945}
18946struct structured_remainder_out_inplace final : public at::native::structured_remainder_out {
18947 structured_remainder_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
18948 void set_output_strided(
18949 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18950 TensorOptions options, DimnameList names
18951 ) override {
18952 const auto& out = outputs_[output_idx].get();
18953 check_inplace(out, sizes, options);
18954 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
18955 if (C10_UNLIKELY(maybe_proxy.has_value())) {
18956 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
18957 }
18958 if (!names.empty()) {
18959 namedinference::propagate_names(outputs_[output_idx], names);
18960 }
18961 // super must happen after, so that downstream can use maybe_get_output
18962 // to retrieve the output
18963 at::native::structured_remainder_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18964 }
18965 void set_output_raw_strided(
18966 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
18967 TensorOptions options, DimnameList names
18968 ) override {
18969 const auto& out = outputs_[output_idx].get();
18970 check_inplace(out, sizes, options);
18971 if (!names.empty()) {
18972 namedinference::propagate_names(outputs_[output_idx], names);
18973 }
18974 // super must happen after, so that downstream can use maybe_get_output
18975 // to retrieve the output
18976 at::native::structured_remainder_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
18977 }
18978 const Tensor& maybe_get_output(int64_t output_idx) override {
18979 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
18980 }
18981 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
18982 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
18983};
18984at::Tensor & wrapper_CPU_remainder__Tensor(at::Tensor & self, const at::Tensor & other) {
18985structured_remainder_out_inplace op(self);
18986op.meta(self, other);
18987op.impl(self, other, op.outputs_[0]);
18988if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
18989return self;
18990}
18991namespace {
18992at::Tensor wrapper_CPU_Scalar_Tensor_remainder(const at::Scalar & self, const at::Tensor & other) {
18993 // No device check
18994 // DeviceGuard omitted
18995 return at::native::remainder(self, other);
18996}
18997} // anonymous namespace
18998namespace {
18999at::Tensor wrapper_CPU__min(const at::Tensor & self) {
19000 // No device check
19001 // DeviceGuard omitted
19002 return at::native::min(self);
19003}
19004} // anonymous namespace
19005struct structured_fmin_out_functional final : public at::native::structured_fmin_out {
19006 void set_output_strided(
19007 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19008 TensorOptions options, DimnameList names
19009 ) override {
19010 outputs_[output_idx] = create_out(sizes, strides, options);
19011 if (!names.empty()) {
19012 namedinference::propagate_names(*outputs_[output_idx], names);
19013 }
19014 // super must happen after, so that downstream can use maybe_get_output
19015 // to retrieve the output
19016 at::native::structured_fmin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19017 }
19018 void set_output_raw_strided(
19019 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19020 TensorOptions options, DimnameList names
19021 ) override {
19022 outputs_[output_idx] = create_out(sizes, strides, options);
19023 if (!names.empty()) {
19024 namedinference::propagate_names(*outputs_[output_idx], names);
19025 }
19026 // super must happen after, so that downstream can use maybe_get_output
19027 // to retrieve the output
19028 at::native::structured_fmin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19029 }
19030 const Tensor& maybe_get_output(int64_t output_idx) override {
19031 return *outputs_[output_idx];
19032 }
19033 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19034};
19035at::Tensor wrapper_CPU_fmin(const at::Tensor & self, const at::Tensor & other) {
19036structured_fmin_out_functional op;
19037op.meta(self, other);
19038op.impl(self, other, *op.outputs_[0]);
19039return std::move(op.outputs_[0]).take();
19040}
19041struct structured_fmin_out_out final : public at::native::structured_fmin_out {
19042 structured_fmin_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
19043 void set_output_strided(
19044 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19045 TensorOptions options, DimnameList names
19046 ) override {
19047 const auto& out = outputs_[output_idx].get();
19048 resize_out(out, sizes, strides, options);
19049 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
19050 if (C10_UNLIKELY(maybe_proxy.has_value())) {
19051 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
19052 }
19053 if (!names.empty()) {
19054 namedinference::propagate_names(outputs_[output_idx], names);
19055 }
19056 // super must happen after, so that downstream can use maybe_get_output
19057 // to retrieve the output
19058 at::native::structured_fmin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19059 }
19060 void set_output_raw_strided(
19061 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19062 TensorOptions options, DimnameList names
19063 ) override {
19064 const auto& out = outputs_[output_idx].get();
19065 resize_out(out, sizes, strides, options);
19066 if (!names.empty()) {
19067 namedinference::propagate_names(outputs_[output_idx], names);
19068 }
19069 // super must happen after, so that downstream can use maybe_get_output
19070 // to retrieve the output
19071 at::native::structured_fmin_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19072 }
19073 const Tensor& maybe_get_output(int64_t output_idx) override {
19074 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
19075 }
19076 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
19077 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
19078};
19079at::Tensor & wrapper_CPU_fmin_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
19080structured_fmin_out_out op(out);
19081op.meta(self, other);
19082op.impl(self, other, op.maybe_get_output(0));
19083if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
19084return out;
19085}
19086namespace {
19087at::Tensor wrapper_CPU__max(const at::Tensor & self) {
19088 // No device check
19089 // DeviceGuard omitted
19090 return at::native::max(self);
19091}
19092} // anonymous namespace
19093namespace {
19094at::Tensor & wrapper_CPU_unary_out_max_out(const at::Tensor & self, at::Tensor & out) {
19095 // No device check
19096 // DeviceGuard omitted
19097 return at::native::max_unary_out(self, out);
19098}
19099} // anonymous namespace
19100struct structured_fmax_out_functional final : public at::native::structured_fmax_out {
19101 void set_output_strided(
19102 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19103 TensorOptions options, DimnameList names
19104 ) override {
19105 outputs_[output_idx] = create_out(sizes, strides, options);
19106 if (!names.empty()) {
19107 namedinference::propagate_names(*outputs_[output_idx], names);
19108 }
19109 // super must happen after, so that downstream can use maybe_get_output
19110 // to retrieve the output
19111 at::native::structured_fmax_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19112 }
19113 void set_output_raw_strided(
19114 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19115 TensorOptions options, DimnameList names
19116 ) override {
19117 outputs_[output_idx] = create_out(sizes, strides, options);
19118 if (!names.empty()) {
19119 namedinference::propagate_names(*outputs_[output_idx], names);
19120 }
19121 // super must happen after, so that downstream can use maybe_get_output
19122 // to retrieve the output
19123 at::native::structured_fmax_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19124 }
19125 const Tensor& maybe_get_output(int64_t output_idx) override {
19126 return *outputs_[output_idx];
19127 }
19128 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19129};
19130at::Tensor wrapper_CPU_fmax(const at::Tensor & self, const at::Tensor & other) {
19131structured_fmax_out_functional op;
19132op.meta(self, other);
19133op.impl(self, other, *op.outputs_[0]);
19134return std::move(op.outputs_[0]).take();
19135}
19136struct structured_fmax_out_out final : public at::native::structured_fmax_out {
19137 structured_fmax_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
19138 void set_output_strided(
19139 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19140 TensorOptions options, DimnameList names
19141 ) override {
19142 const auto& out = outputs_[output_idx].get();
19143 resize_out(out, sizes, strides, options);
19144 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
19145 if (C10_UNLIKELY(maybe_proxy.has_value())) {
19146 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
19147 }
19148 if (!names.empty()) {
19149 namedinference::propagate_names(outputs_[output_idx], names);
19150 }
19151 // super must happen after, so that downstream can use maybe_get_output
19152 // to retrieve the output
19153 at::native::structured_fmax_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19154 }
19155 void set_output_raw_strided(
19156 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19157 TensorOptions options, DimnameList names
19158 ) override {
19159 const auto& out = outputs_[output_idx].get();
19160 resize_out(out, sizes, strides, options);
19161 if (!names.empty()) {
19162 namedinference::propagate_names(outputs_[output_idx], names);
19163 }
19164 // super must happen after, so that downstream can use maybe_get_output
19165 // to retrieve the output
19166 at::native::structured_fmax_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19167 }
19168 const Tensor& maybe_get_output(int64_t output_idx) override {
19169 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
19170 }
19171 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
19172 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
19173};
19174at::Tensor & wrapper_CPU_fmax_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
19175structured_fmax_out_out op(out);
19176op.meta(self, other);
19177op.impl(self, other, op.maybe_get_output(0));
19178if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
19179return out;
19180}
19181struct structured_maximum_out_functional final : public at::native::structured_maximum_out {
19182 void set_output_strided(
19183 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19184 TensorOptions options, DimnameList names
19185 ) override {
19186 outputs_[output_idx] = create_out(sizes, strides, options);
19187 if (!names.empty()) {
19188 namedinference::propagate_names(*outputs_[output_idx], names);
19189 }
19190 // super must happen after, so that downstream can use maybe_get_output
19191 // to retrieve the output
19192 at::native::structured_maximum_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19193 }
19194 void set_output_raw_strided(
19195 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19196 TensorOptions options, DimnameList names
19197 ) override {
19198 outputs_[output_idx] = create_out(sizes, strides, options);
19199 if (!names.empty()) {
19200 namedinference::propagate_names(*outputs_[output_idx], names);
19201 }
19202 // super must happen after, so that downstream can use maybe_get_output
19203 // to retrieve the output
19204 at::native::structured_maximum_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19205 }
19206 const Tensor& maybe_get_output(int64_t output_idx) override {
19207 return *outputs_[output_idx];
19208 }
19209 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19210};
19211at::Tensor wrapper_CPU_maximum(const at::Tensor & self, const at::Tensor & other) {
19212structured_maximum_out_functional op;
19213op.meta(self, other);
19214op.impl(self, other, *op.outputs_[0]);
19215return std::move(op.outputs_[0]).take();
19216}
19217struct structured_maximum_out_out final : public at::native::structured_maximum_out {
19218 structured_maximum_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
19219 void set_output_strided(
19220 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19221 TensorOptions options, DimnameList names
19222 ) override {
19223 const auto& out = outputs_[output_idx].get();
19224 resize_out(out, sizes, strides, options);
19225 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
19226 if (C10_UNLIKELY(maybe_proxy.has_value())) {
19227 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
19228 }
19229 if (!names.empty()) {
19230 namedinference::propagate_names(outputs_[output_idx], names);
19231 }
19232 // super must happen after, so that downstream can use maybe_get_output
19233 // to retrieve the output
19234 at::native::structured_maximum_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19235 }
19236 void set_output_raw_strided(
19237 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19238 TensorOptions options, DimnameList names
19239 ) override {
19240 const auto& out = outputs_[output_idx].get();
19241 resize_out(out, sizes, strides, options);
19242 if (!names.empty()) {
19243 namedinference::propagate_names(outputs_[output_idx], names);
19244 }
19245 // super must happen after, so that downstream can use maybe_get_output
19246 // to retrieve the output
19247 at::native::structured_maximum_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19248 }
19249 const Tensor& maybe_get_output(int64_t output_idx) override {
19250 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
19251 }
19252 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
19253 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
19254};
19255at::Tensor & wrapper_CPU_maximum_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
19256structured_maximum_out_out op(out);
19257op.meta(self, other);
19258op.impl(self, other, op.maybe_get_output(0));
19259if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
19260return out;
19261}
19262struct structured_minimum_out_functional final : public at::native::structured_minimum_out {
19263 void set_output_strided(
19264 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19265 TensorOptions options, DimnameList names
19266 ) override {
19267 outputs_[output_idx] = create_out(sizes, strides, options);
19268 if (!names.empty()) {
19269 namedinference::propagate_names(*outputs_[output_idx], names);
19270 }
19271 // super must happen after, so that downstream can use maybe_get_output
19272 // to retrieve the output
19273 at::native::structured_minimum_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19274 }
19275 void set_output_raw_strided(
19276 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19277 TensorOptions options, DimnameList names
19278 ) override {
19279 outputs_[output_idx] = create_out(sizes, strides, options);
19280 if (!names.empty()) {
19281 namedinference::propagate_names(*outputs_[output_idx], names);
19282 }
19283 // super must happen after, so that downstream can use maybe_get_output
19284 // to retrieve the output
19285 at::native::structured_minimum_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19286 }
19287 const Tensor& maybe_get_output(int64_t output_idx) override {
19288 return *outputs_[output_idx];
19289 }
19290 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19291};
19292at::Tensor wrapper_CPU_minimum(const at::Tensor & self, const at::Tensor & other) {
19293structured_minimum_out_functional op;
19294op.meta(self, other);
19295op.impl(self, other, *op.outputs_[0]);
19296return std::move(op.outputs_[0]).take();
19297}
19298struct structured_minimum_out_out final : public at::native::structured_minimum_out {
19299 structured_minimum_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
19300 void set_output_strided(
19301 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19302 TensorOptions options, DimnameList names
19303 ) override {
19304 const auto& out = outputs_[output_idx].get();
19305 resize_out(out, sizes, strides, options);
19306 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
19307 if (C10_UNLIKELY(maybe_proxy.has_value())) {
19308 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
19309 }
19310 if (!names.empty()) {
19311 namedinference::propagate_names(outputs_[output_idx], names);
19312 }
19313 // super must happen after, so that downstream can use maybe_get_output
19314 // to retrieve the output
19315 at::native::structured_minimum_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19316 }
19317 void set_output_raw_strided(
19318 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19319 TensorOptions options, DimnameList names
19320 ) override {
19321 const auto& out = outputs_[output_idx].get();
19322 resize_out(out, sizes, strides, options);
19323 if (!names.empty()) {
19324 namedinference::propagate_names(outputs_[output_idx], names);
19325 }
19326 // super must happen after, so that downstream can use maybe_get_output
19327 // to retrieve the output
19328 at::native::structured_minimum_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19329 }
19330 const Tensor& maybe_get_output(int64_t output_idx) override {
19331 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
19332 }
19333 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
19334 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
19335};
19336at::Tensor & wrapper_CPU_minimum_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
19337structured_minimum_out_out op(out);
19338op.meta(self, other);
19339op.impl(self, other, op.maybe_get_output(0));
19340if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
19341return out;
19342}
19343struct structured_sort_stable_out_functional final : public at::native::structured_sort_stable_out {
19344 void set_output_strided(
19345 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19346 TensorOptions options, DimnameList names
19347 ) override {
19348 outputs_[output_idx] = create_out(sizes, strides, options);
19349 if (!names.empty()) {
19350 namedinference::propagate_names(*outputs_[output_idx], names);
19351 }
19352 // super must happen after, so that downstream can use maybe_get_output
19353 // to retrieve the output
19354 }
19355 void set_output_raw_strided(
19356 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19357 TensorOptions options, DimnameList names
19358 ) override {
19359 outputs_[output_idx] = create_out(sizes, strides, options);
19360 if (!names.empty()) {
19361 namedinference::propagate_names(*outputs_[output_idx], names);
19362 }
19363 // super must happen after, so that downstream can use maybe_get_output
19364 // to retrieve the output
19365 }
19366 const Tensor& maybe_get_output(int64_t output_idx) override {
19367 return *outputs_[output_idx];
19368 }
19369 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
19370};
19371::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_sort_stable(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
19372structured_sort_stable_out_functional op;
19373op.meta(self, stable, dim, descending);
19374op.impl(self, stable, dim, descending, *op.outputs_[0], *op.outputs_[1]);
19375return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
19376}
19377struct structured_sort_stable_out_out final : public at::native::structured_sort_stable_out {
19378 structured_sort_stable_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
19379 void set_output_strided(
19380 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19381 TensorOptions options, DimnameList names
19382 ) override {
19383 const auto& out = outputs_[output_idx].get();
19384 resize_out(out, sizes, strides, options);
19385 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
19386 if (C10_UNLIKELY(maybe_proxy.has_value())) {
19387 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
19388 }
19389 if (!names.empty()) {
19390 namedinference::propagate_names(outputs_[output_idx], names);
19391 }
19392 // super must happen after, so that downstream can use maybe_get_output
19393 // to retrieve the output
19394 }
19395 void set_output_raw_strided(
19396 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19397 TensorOptions options, DimnameList names
19398 ) override {
19399 const auto& out = outputs_[output_idx].get();
19400 resize_out(out, sizes, strides, options);
19401 if (!names.empty()) {
19402 namedinference::propagate_names(outputs_[output_idx], names);
19403 }
19404 // super must happen after, so that downstream can use maybe_get_output
19405 // to retrieve the output
19406 }
19407 const Tensor& maybe_get_output(int64_t output_idx) override {
19408 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
19409 }
19410 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
19411 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
19412};
19413::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_sort_out_values_stable(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
19414structured_sort_stable_out_out op(values, indices);
19415op.meta(self, stable, dim, descending);
19416op.impl(self, stable, dim, descending, op.maybe_get_output(0), op.maybe_get_output(1));
19417if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
19418if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
19419return std::forward_as_tuple(values, indices);
19420}
19421namespace {
19422at::Tensor wrapper_CPU_stable_argsort(const at::Tensor & self, bool stable, int64_t dim, bool descending) {
19423 // No device check
19424 // DeviceGuard omitted
19425 return at::native::argsort_stable(self, stable, dim, descending);
19426}
19427} // anonymous namespace
19428struct structured_topk_out_cpu_functional final : public at::native::structured_topk_out_cpu {
19429 void set_output_strided(
19430 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19431 TensorOptions options, DimnameList names
19432 ) override {
19433 outputs_[output_idx] = create_out(sizes, strides, options);
19434 if (!names.empty()) {
19435 namedinference::propagate_names(*outputs_[output_idx], names);
19436 }
19437 // super must happen after, so that downstream can use maybe_get_output
19438 // to retrieve the output
19439 }
19440 void set_output_raw_strided(
19441 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19442 TensorOptions options, DimnameList names
19443 ) override {
19444 outputs_[output_idx] = create_out(sizes, strides, options);
19445 if (!names.empty()) {
19446 namedinference::propagate_names(*outputs_[output_idx], names);
19447 }
19448 // super must happen after, so that downstream can use maybe_get_output
19449 // to retrieve the output
19450 }
19451 const Tensor& maybe_get_output(int64_t output_idx) override {
19452 return *outputs_[output_idx];
19453 }
19454 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
19455};
19456::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
19457structured_topk_out_cpu_functional op;
19458op.meta(self, k, dim, largest, sorted);
19459op.impl(self, k, dim, largest, sorted, *op.outputs_[0], *op.outputs_[1]);
19460return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
19461}
19462struct structured_topk_out_cpu_out final : public at::native::structured_topk_out_cpu {
19463 structured_topk_out_cpu_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
19464 void set_output_strided(
19465 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19466 TensorOptions options, DimnameList names
19467 ) override {
19468 const auto& out = outputs_[output_idx].get();
19469 resize_out(out, sizes, strides, options);
19470 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
19471 if (C10_UNLIKELY(maybe_proxy.has_value())) {
19472 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
19473 }
19474 if (!names.empty()) {
19475 namedinference::propagate_names(outputs_[output_idx], names);
19476 }
19477 // super must happen after, so that downstream can use maybe_get_output
19478 // to retrieve the output
19479 }
19480 void set_output_raw_strided(
19481 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19482 TensorOptions options, DimnameList names
19483 ) override {
19484 const auto& out = outputs_[output_idx].get();
19485 resize_out(out, sizes, strides, options);
19486 if (!names.empty()) {
19487 namedinference::propagate_names(outputs_[output_idx], names);
19488 }
19489 // super must happen after, so that downstream can use maybe_get_output
19490 // to retrieve the output
19491 }
19492 const Tensor& maybe_get_output(int64_t output_idx) override {
19493 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
19494 }
19495 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
19496 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
19497};
19498::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_topk_out_values(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) {
19499structured_topk_out_cpu_out op(values, indices);
19500op.meta(self, k, dim, largest, sorted);
19501op.impl(self, k, dim, largest, sorted, op.maybe_get_output(0), op.maybe_get_output(1));
19502if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
19503if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
19504return std::forward_as_tuple(values, indices);
19505}
19506struct structured_all_all_out_functional final : public at::native::structured_all_all_out {
19507 void set_output_strided(
19508 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19509 TensorOptions options, DimnameList names
19510 ) override {
19511 outputs_[output_idx] = create_out(sizes, strides, options);
19512 if (!names.empty()) {
19513 namedinference::propagate_names(*outputs_[output_idx], names);
19514 }
19515 // super must happen after, so that downstream can use maybe_get_output
19516 // to retrieve the output
19517 }
19518 void set_output_raw_strided(
19519 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19520 TensorOptions options, DimnameList names
19521 ) override {
19522 outputs_[output_idx] = create_out(sizes, strides, options);
19523 if (!names.empty()) {
19524 namedinference::propagate_names(*outputs_[output_idx], names);
19525 }
19526 // super must happen after, so that downstream can use maybe_get_output
19527 // to retrieve the output
19528 }
19529 const Tensor& maybe_get_output(int64_t output_idx) override {
19530 return *outputs_[output_idx];
19531 }
19532 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19533};
19534at::Tensor wrapper_CPU_all(const at::Tensor & self) {
19535structured_all_all_out_functional op;
19536op.meta(self);
19537op.impl(self, *op.outputs_[0]);
19538return std::move(op.outputs_[0]).take();
19539}
19540struct structured_all_all_out_out final : public at::native::structured_all_all_out {
19541 structured_all_all_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
19542 void set_output_strided(
19543 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19544 TensorOptions options, DimnameList names
19545 ) override {
19546 const auto& out = outputs_[output_idx].get();
19547 resize_out(out, sizes, strides, options);
19548 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
19549 if (C10_UNLIKELY(maybe_proxy.has_value())) {
19550 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
19551 }
19552 if (!names.empty()) {
19553 namedinference::propagate_names(outputs_[output_idx], names);
19554 }
19555 // super must happen after, so that downstream can use maybe_get_output
19556 // to retrieve the output
19557 }
19558 void set_output_raw_strided(
19559 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19560 TensorOptions options, DimnameList names
19561 ) override {
19562 const auto& out = outputs_[output_idx].get();
19563 resize_out(out, sizes, strides, options);
19564 if (!names.empty()) {
19565 namedinference::propagate_names(outputs_[output_idx], names);
19566 }
19567 // super must happen after, so that downstream can use maybe_get_output
19568 // to retrieve the output
19569 }
19570 const Tensor& maybe_get_output(int64_t output_idx) override {
19571 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
19572 }
19573 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
19574 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
19575};
19576at::Tensor & wrapper_CPU_all_out_all_out(const at::Tensor & self, at::Tensor & out) {
19577structured_all_all_out_out op(out);
19578op.meta(self);
19579op.impl(self, op.maybe_get_output(0));
19580if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
19581return out;
19582}
19583struct structured_any_all_out_functional final : public at::native::structured_any_all_out {
19584 void set_output_strided(
19585 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19586 TensorOptions options, DimnameList names
19587 ) override {
19588 outputs_[output_idx] = create_out(sizes, strides, options);
19589 if (!names.empty()) {
19590 namedinference::propagate_names(*outputs_[output_idx], names);
19591 }
19592 // super must happen after, so that downstream can use maybe_get_output
19593 // to retrieve the output
19594 }
19595 void set_output_raw_strided(
19596 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19597 TensorOptions options, DimnameList names
19598 ) override {
19599 outputs_[output_idx] = create_out(sizes, strides, options);
19600 if (!names.empty()) {
19601 namedinference::propagate_names(*outputs_[output_idx], names);
19602 }
19603 // super must happen after, so that downstream can use maybe_get_output
19604 // to retrieve the output
19605 }
19606 const Tensor& maybe_get_output(int64_t output_idx) override {
19607 return *outputs_[output_idx];
19608 }
19609 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19610};
19611at::Tensor wrapper_CPU_any(const at::Tensor & self) {
19612structured_any_all_out_functional op;
19613op.meta(self);
19614op.impl(self, *op.outputs_[0]);
19615return std::move(op.outputs_[0]).take();
19616}
19617struct structured_any_all_out_out final : public at::native::structured_any_all_out {
19618 structured_any_all_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
19619 void set_output_strided(
19620 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19621 TensorOptions options, DimnameList names
19622 ) override {
19623 const auto& out = outputs_[output_idx].get();
19624 resize_out(out, sizes, strides, options);
19625 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
19626 if (C10_UNLIKELY(maybe_proxy.has_value())) {
19627 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
19628 }
19629 if (!names.empty()) {
19630 namedinference::propagate_names(outputs_[output_idx], names);
19631 }
19632 // super must happen after, so that downstream can use maybe_get_output
19633 // to retrieve the output
19634 }
19635 void set_output_raw_strided(
19636 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19637 TensorOptions options, DimnameList names
19638 ) override {
19639 const auto& out = outputs_[output_idx].get();
19640 resize_out(out, sizes, strides, options);
19641 if (!names.empty()) {
19642 namedinference::propagate_names(outputs_[output_idx], names);
19643 }
19644 // super must happen after, so that downstream can use maybe_get_output
19645 // to retrieve the output
19646 }
19647 const Tensor& maybe_get_output(int64_t output_idx) override {
19648 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
19649 }
19650 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
19651 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
19652};
19653at::Tensor & wrapper_CPU_any_out_all_out(const at::Tensor & self, at::Tensor & out) {
19654structured_any_all_out_out op(out);
19655op.meta(self);
19656op.impl(self, op.maybe_get_output(0));
19657if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
19658return out;
19659}
19660struct structured_renorm_out_functional final : public at::native::structured_renorm_out {
19661 void set_output_strided(
19662 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19663 TensorOptions options, DimnameList names
19664 ) override {
19665 outputs_[output_idx] = create_out(sizes, strides, options);
19666 if (!names.empty()) {
19667 namedinference::propagate_names(*outputs_[output_idx], names);
19668 }
19669 // super must happen after, so that downstream can use maybe_get_output
19670 // to retrieve the output
19671 }
19672 void set_output_raw_strided(
19673 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19674 TensorOptions options, DimnameList names
19675 ) override {
19676 outputs_[output_idx] = create_out(sizes, strides, options);
19677 if (!names.empty()) {
19678 namedinference::propagate_names(*outputs_[output_idx], names);
19679 }
19680 // super must happen after, so that downstream can use maybe_get_output
19681 // to retrieve the output
19682 }
19683 const Tensor& maybe_get_output(int64_t output_idx) override {
19684 return *outputs_[output_idx];
19685 }
19686 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19687};
19688at::Tensor wrapper_CPU_renorm(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
19689structured_renorm_out_functional op;
19690op.meta(self, p, dim, maxnorm);
19691op.impl(self, p, dim, maxnorm, *op.outputs_[0]);
19692return std::move(op.outputs_[0]).take();
19693}
19694struct structured_renorm_out_out final : public at::native::structured_renorm_out {
19695 structured_renorm_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
19696 void set_output_strided(
19697 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19698 TensorOptions options, DimnameList names
19699 ) override {
19700 const auto& out = outputs_[output_idx].get();
19701 resize_out(out, sizes, strides, options);
19702 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
19703 if (C10_UNLIKELY(maybe_proxy.has_value())) {
19704 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
19705 }
19706 if (!names.empty()) {
19707 namedinference::propagate_names(outputs_[output_idx], names);
19708 }
19709 // super must happen after, so that downstream can use maybe_get_output
19710 // to retrieve the output
19711 }
19712 void set_output_raw_strided(
19713 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19714 TensorOptions options, DimnameList names
19715 ) override {
19716 const auto& out = outputs_[output_idx].get();
19717 resize_out(out, sizes, strides, options);
19718 if (!names.empty()) {
19719 namedinference::propagate_names(outputs_[output_idx], names);
19720 }
19721 // super must happen after, so that downstream can use maybe_get_output
19722 // to retrieve the output
19723 }
19724 const Tensor& maybe_get_output(int64_t output_idx) override {
19725 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
19726 }
19727 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
19728 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
19729};
19730at::Tensor & wrapper_CPU_renorm_out_out(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out) {
19731structured_renorm_out_out op(out);
19732op.meta(self, p, dim, maxnorm);
19733op.impl(self, p, dim, maxnorm, op.maybe_get_output(0));
19734if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
19735return out;
19736}
19737struct structured_renorm_out_inplace final : public at::native::structured_renorm_out {
19738 structured_renorm_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
19739 void set_output_strided(
19740 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19741 TensorOptions options, DimnameList names
19742 ) override {
19743 const auto& out = outputs_[output_idx].get();
19744 check_inplace(out, sizes, options);
19745 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
19746 if (C10_UNLIKELY(maybe_proxy.has_value())) {
19747 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
19748 }
19749 if (!names.empty()) {
19750 namedinference::propagate_names(outputs_[output_idx], names);
19751 }
19752 // super must happen after, so that downstream can use maybe_get_output
19753 // to retrieve the output
19754 }
19755 void set_output_raw_strided(
19756 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19757 TensorOptions options, DimnameList names
19758 ) override {
19759 const auto& out = outputs_[output_idx].get();
19760 check_inplace(out, sizes, options);
19761 if (!names.empty()) {
19762 namedinference::propagate_names(outputs_[output_idx], names);
19763 }
19764 // super must happen after, so that downstream can use maybe_get_output
19765 // to retrieve the output
19766 }
19767 const Tensor& maybe_get_output(int64_t output_idx) override {
19768 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
19769 }
19770 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
19771 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
19772};
19773at::Tensor & wrapper_CPU_renorm_(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
19774structured_renorm_out_inplace op(self);
19775op.meta(self, p, dim, maxnorm);
19776op.impl(self, p, dim, maxnorm, op.outputs_[0]);
19777if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
19778return self;
19779}
19780namespace {
19781at::Tensor wrapper_CPU__unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
19782 // No device check
19783 // DeviceGuard omitted
19784 return at::native::unfold(self, dimension, size, step);
19785}
19786} // anonymous namespace
19787namespace {
19788at::Tensor wrapper_CPU__unfold_backward(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
19789 // No device check
19790 // DeviceGuard omitted
19791 return at::native::unfold_backward(grad_in, C10_AS_INTARRAYREF_SLOW(input_sizes), dim, size, step);
19792}
19793} // anonymous namespace
19794namespace {
19795bool wrapper_CPU__equal(const at::Tensor & self, const at::Tensor & other) {
19796 // No device check
19797 // DeviceGuard omitted
19798 return at::native::cpu_equal(self, other);
19799}
19800} // anonymous namespace
19801struct structured_pow_Tensor_Tensor_out_functional final : public at::native::structured_pow_Tensor_Tensor_out {
19802 void set_output_strided(
19803 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19804 TensorOptions options, DimnameList names
19805 ) override {
19806 outputs_[output_idx] = create_out(sizes, strides, options);
19807 if (!names.empty()) {
19808 namedinference::propagate_names(*outputs_[output_idx], names);
19809 }
19810 // super must happen after, so that downstream can use maybe_get_output
19811 // to retrieve the output
19812 at::native::structured_pow_Tensor_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19813 }
19814 void set_output_raw_strided(
19815 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19816 TensorOptions options, DimnameList names
19817 ) override {
19818 outputs_[output_idx] = create_out(sizes, strides, options);
19819 if (!names.empty()) {
19820 namedinference::propagate_names(*outputs_[output_idx], names);
19821 }
19822 // super must happen after, so that downstream can use maybe_get_output
19823 // to retrieve the output
19824 at::native::structured_pow_Tensor_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19825 }
19826 const Tensor& maybe_get_output(int64_t output_idx) override {
19827 return *outputs_[output_idx];
19828 }
19829 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19830};
19831at::Tensor wrapper_CPU_pow_Tensor_Tensor(const at::Tensor & self, const at::Tensor & exponent) {
19832structured_pow_Tensor_Tensor_out_functional op;
19833op.meta(self, exponent);
19834op.impl(self, exponent, *op.outputs_[0]);
19835return std::move(op.outputs_[0]).take();
19836}
19837struct structured_pow_Tensor_Tensor_out_out final : public at::native::structured_pow_Tensor_Tensor_out {
19838 structured_pow_Tensor_Tensor_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
19839 void set_output_strided(
19840 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19841 TensorOptions options, DimnameList names
19842 ) override {
19843 const auto& out = outputs_[output_idx].get();
19844 resize_out(out, sizes, strides, options);
19845 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
19846 if (C10_UNLIKELY(maybe_proxy.has_value())) {
19847 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
19848 }
19849 if (!names.empty()) {
19850 namedinference::propagate_names(outputs_[output_idx], names);
19851 }
19852 // super must happen after, so that downstream can use maybe_get_output
19853 // to retrieve the output
19854 at::native::structured_pow_Tensor_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19855 }
19856 void set_output_raw_strided(
19857 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19858 TensorOptions options, DimnameList names
19859 ) override {
19860 const auto& out = outputs_[output_idx].get();
19861 resize_out(out, sizes, strides, options);
19862 if (!names.empty()) {
19863 namedinference::propagate_names(outputs_[output_idx], names);
19864 }
19865 // super must happen after, so that downstream can use maybe_get_output
19866 // to retrieve the output
19867 at::native::structured_pow_Tensor_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19868 }
19869 const Tensor& maybe_get_output(int64_t output_idx) override {
19870 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
19871 }
19872 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
19873 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
19874};
19875at::Tensor & wrapper_CPU_pow_out_Tensor_Tensor_out(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
19876structured_pow_Tensor_Tensor_out_out op(out);
19877op.meta(self, exponent);
19878op.impl(self, exponent, op.maybe_get_output(0));
19879if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
19880return out;
19881}
19882struct structured_pow_Tensor_Tensor_out_inplace final : public at::native::structured_pow_Tensor_Tensor_out {
19883 structured_pow_Tensor_Tensor_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
19884 void set_output_strided(
19885 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19886 TensorOptions options, DimnameList names
19887 ) override {
19888 const auto& out = outputs_[output_idx].get();
19889 check_inplace(out, sizes, options);
19890 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
19891 if (C10_UNLIKELY(maybe_proxy.has_value())) {
19892 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
19893 }
19894 if (!names.empty()) {
19895 namedinference::propagate_names(outputs_[output_idx], names);
19896 }
19897 // super must happen after, so that downstream can use maybe_get_output
19898 // to retrieve the output
19899 at::native::structured_pow_Tensor_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19900 }
19901 void set_output_raw_strided(
19902 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19903 TensorOptions options, DimnameList names
19904 ) override {
19905 const auto& out = outputs_[output_idx].get();
19906 check_inplace(out, sizes, options);
19907 if (!names.empty()) {
19908 namedinference::propagate_names(outputs_[output_idx], names);
19909 }
19910 // super must happen after, so that downstream can use maybe_get_output
19911 // to retrieve the output
19912 at::native::structured_pow_Tensor_Tensor_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
19913 }
19914 const Tensor& maybe_get_output(int64_t output_idx) override {
19915 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
19916 }
19917 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
19918 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
19919};
19920at::Tensor & wrapper_CPU_pow__Tensor(at::Tensor & self, const at::Tensor & exponent) {
19921structured_pow_Tensor_Tensor_out_inplace op(self);
19922op.meta(self, exponent);
19923op.impl(self, exponent, op.outputs_[0]);
19924if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
19925return self;
19926}
19927struct structured_pow_Scalar_out_functional final : public at::native::structured_pow_Scalar_out {
19928 void set_output_strided(
19929 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19930 TensorOptions options, DimnameList names
19931 ) override {
19932 outputs_[output_idx] = create_out(sizes, strides, options);
19933 if (!names.empty()) {
19934 namedinference::propagate_names(*outputs_[output_idx], names);
19935 }
19936 // super must happen after, so that downstream can use maybe_get_output
19937 // to retrieve the output
19938 }
19939 void set_output_raw_strided(
19940 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19941 TensorOptions options, DimnameList names
19942 ) override {
19943 outputs_[output_idx] = create_out(sizes, strides, options);
19944 if (!names.empty()) {
19945 namedinference::propagate_names(*outputs_[output_idx], names);
19946 }
19947 // super must happen after, so that downstream can use maybe_get_output
19948 // to retrieve the output
19949 }
19950 const Tensor& maybe_get_output(int64_t output_idx) override {
19951 return *outputs_[output_idx];
19952 }
19953 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
19954};
19955at::Tensor wrapper_CPU_pow_Scalar(const at::Scalar & self, const at::Tensor & exponent) {
19956structured_pow_Scalar_out_functional op;
19957op.meta(self, exponent);
19958op.impl(self, exponent, *op.outputs_[0]);
19959return std::move(op.outputs_[0]).take();
19960}
19961struct structured_pow_Scalar_out_out final : public at::native::structured_pow_Scalar_out {
19962 structured_pow_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
19963 void set_output_strided(
19964 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19965 TensorOptions options, DimnameList names
19966 ) override {
19967 const auto& out = outputs_[output_idx].get();
19968 resize_out(out, sizes, strides, options);
19969 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
19970 if (C10_UNLIKELY(maybe_proxy.has_value())) {
19971 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
19972 }
19973 if (!names.empty()) {
19974 namedinference::propagate_names(outputs_[output_idx], names);
19975 }
19976 // super must happen after, so that downstream can use maybe_get_output
19977 // to retrieve the output
19978 }
19979 void set_output_raw_strided(
19980 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
19981 TensorOptions options, DimnameList names
19982 ) override {
19983 const auto& out = outputs_[output_idx].get();
19984 resize_out(out, sizes, strides, options);
19985 if (!names.empty()) {
19986 namedinference::propagate_names(outputs_[output_idx], names);
19987 }
19988 // super must happen after, so that downstream can use maybe_get_output
19989 // to retrieve the output
19990 }
19991 const Tensor& maybe_get_output(int64_t output_idx) override {
19992 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
19993 }
19994 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
19995 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
19996};
19997at::Tensor & wrapper_CPU_pow_out_Scalar_out(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
19998structured_pow_Scalar_out_out op(out);
19999op.meta(self, exponent);
20000op.impl(self, exponent, op.maybe_get_output(0));
20001if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
20002return out;
20003}
20004struct structured_pow_Tensor_Scalar_out_functional final : public at::native::structured_pow_Tensor_Scalar_out {
20005 void set_output_strided(
20006 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20007 TensorOptions options, DimnameList names
20008 ) override {
20009 outputs_[output_idx] = create_out(sizes, strides, options);
20010 if (!names.empty()) {
20011 namedinference::propagate_names(*outputs_[output_idx], names);
20012 }
20013 // super must happen after, so that downstream can use maybe_get_output
20014 // to retrieve the output
20015 at::native::structured_pow_Tensor_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
20016 }
20017 void set_output_raw_strided(
20018 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20019 TensorOptions options, DimnameList names
20020 ) override {
20021 outputs_[output_idx] = create_out(sizes, strides, options);
20022 if (!names.empty()) {
20023 namedinference::propagate_names(*outputs_[output_idx], names);
20024 }
20025 // super must happen after, so that downstream can use maybe_get_output
20026 // to retrieve the output
20027 at::native::structured_pow_Tensor_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
20028 }
20029 const Tensor& maybe_get_output(int64_t output_idx) override {
20030 return *outputs_[output_idx];
20031 }
20032 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
20033};
20034at::Tensor wrapper_CPU_pow_Tensor_Scalar(const at::Tensor & self, const at::Scalar & exponent) {
20035structured_pow_Tensor_Scalar_out_functional op;
20036op.meta(self, exponent);
20037op.impl(self, exponent, *op.outputs_[0]);
20038return std::move(op.outputs_[0]).take();
20039}
20040struct structured_pow_Tensor_Scalar_out_out final : public at::native::structured_pow_Tensor_Scalar_out {
20041 structured_pow_Tensor_Scalar_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
20042 void set_output_strided(
20043 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20044 TensorOptions options, DimnameList names
20045 ) override {
20046 const auto& out = outputs_[output_idx].get();
20047 resize_out(out, sizes, strides, options);
20048 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
20049 if (C10_UNLIKELY(maybe_proxy.has_value())) {
20050 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
20051 }
20052 if (!names.empty()) {
20053 namedinference::propagate_names(outputs_[output_idx], names);
20054 }
20055 // super must happen after, so that downstream can use maybe_get_output
20056 // to retrieve the output
20057 at::native::structured_pow_Tensor_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
20058 }
20059 void set_output_raw_strided(
20060 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20061 TensorOptions options, DimnameList names
20062 ) override {
20063 const auto& out = outputs_[output_idx].get();
20064 resize_out(out, sizes, strides, options);
20065 if (!names.empty()) {
20066 namedinference::propagate_names(outputs_[output_idx], names);
20067 }
20068 // super must happen after, so that downstream can use maybe_get_output
20069 // to retrieve the output
20070 at::native::structured_pow_Tensor_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
20071 }
20072 const Tensor& maybe_get_output(int64_t output_idx) override {
20073 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
20074 }
20075 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
20076 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
20077};
20078at::Tensor & wrapper_CPU_pow_out_Tensor_Scalar_out(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
20079structured_pow_Tensor_Scalar_out_out op(out);
20080op.meta(self, exponent);
20081op.impl(self, exponent, op.maybe_get_output(0));
20082if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
20083return out;
20084}
20085struct structured_pow_Tensor_Scalar_out_inplace final : public at::native::structured_pow_Tensor_Scalar_out {
20086 structured_pow_Tensor_Scalar_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
20087 void set_output_strided(
20088 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20089 TensorOptions options, DimnameList names
20090 ) override {
20091 const auto& out = outputs_[output_idx].get();
20092 check_inplace(out, sizes, options);
20093 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
20094 if (C10_UNLIKELY(maybe_proxy.has_value())) {
20095 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
20096 }
20097 if (!names.empty()) {
20098 namedinference::propagate_names(outputs_[output_idx], names);
20099 }
20100 // super must happen after, so that downstream can use maybe_get_output
20101 // to retrieve the output
20102 at::native::structured_pow_Tensor_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
20103 }
20104 void set_output_raw_strided(
20105 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
20106 TensorOptions options, DimnameList names
20107 ) override {
20108 const auto& out = outputs_[output_idx].get();
20109 check_inplace(out, sizes, options);
20110 if (!names.empty()) {
20111 namedinference::propagate_names(outputs_[output_idx], names);
20112 }
20113 // super must happen after, so that downstream can use maybe_get_output
20114 // to retrieve the output
20115 at::native::structured_pow_Tensor_Scalar_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
20116 }
20117 const Tensor& maybe_get_output(int64_t output_idx) override {
20118 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
20119 }
20120 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
20121 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
20122};
20123at::Tensor & wrapper_CPU_pow__Scalar(at::Tensor & self, const at::Scalar & exponent) {
20124structured_pow_Tensor_Scalar_out_inplace op(self);
20125op.meta(self, exponent);
20126op.impl(self, exponent, op.outputs_[0]);
20127if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
20128return self;
20129}
20130namespace {
20131at::Tensor & wrapper_CPU__normal_(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
20132 // No device check
20133 // DeviceGuard omitted
20134 return at::native::normal_(self, mean, std, generator);
20135}
20136} // anonymous namespace
20137namespace {
20138at::Tensor wrapper_CPU_Tensor_float_normal(const at::Tensor & mean, double std, c10::optional<at::Generator> generator) {
20139 // No device check
20140 // DeviceGuard omitted
20141 return at::native::normal(mean, std, generator);
20142}
20143} // anonymous namespace
20144namespace {
20145at::Tensor & wrapper_CPU_Tensor_float_out_normal_out(const at::Tensor & mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) {
20146 // No device check
20147 // DeviceGuard omitted
20148 return at::native::normal_out(mean, std, generator, out);
20149}
20150} // anonymous namespace
20151namespace {
20152at::Tensor wrapper_CPU_float_Tensor_normal(double mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
20153 // No device check
20154 // DeviceGuard omitted
20155 return at::native::normal(mean, std, generator);
20156}
20157} // anonymous namespace
20158namespace {
20159at::Tensor & wrapper_CPU_float_Tensor_out_normal_out(double mean, const at::Tensor & std, c10::optional<at::Generator> generator, at::Tensor & out) {
20160 // No device check
20161 // DeviceGuard omitted
20162 return at::native::normal_out(mean, std, generator, out);
20163}
20164} // anonymous namespace
20165namespace {
20166at::Tensor wrapper_CPU_Tensor_Tensor_normal(const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
20167 // No device check
20168 // DeviceGuard omitted
20169 return at::native::normal(mean, std, generator);
20170}
20171} // anonymous namespace
20172namespace {
20173at::Tensor & wrapper_CPU_Tensor_Tensor_out_normal_out(const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator, at::Tensor & out) {
20174 // No device check
20175 // DeviceGuard omitted
20176 return at::native::normal_out(mean, std, generator, out);
20177}
20178} // anonymous namespace
20179namespace {
20180::std::vector<at::Tensor> wrapper_CPU_Scalar__foreach_add(at::TensorList self, const at::Scalar & scalar) {
20181 // No device check
20182 // DeviceGuard omitted
20183 return at::native::foreach_tensor_add_scalar_kernel_slow(self, scalar);
20184}
20185} // anonymous namespace
20186namespace {
20187void wrapper_CPU_Scalar__foreach_add_(at::TensorList self, const at::Scalar & scalar) {
20188 // No device check
20189 // DeviceGuard omitted
20190 return at::native::foreach_tensor_add_scalar_kernel_slow_(self, scalar);
20191}
20192} // anonymous namespace
20193namespace {
20194::std::vector<at::Tensor> wrapper_CPU_Scalar__foreach_sub(at::TensorList self, const at::Scalar & scalar) {
20195 // No device check
20196 // DeviceGuard omitted
20197 return at::native::foreach_tensor_sub_scalar_kernel_slow(self, scalar);
20198}
20199} // anonymous namespace
20200namespace {
20201void wrapper_CPU_Scalar__foreach_sub_(at::TensorList self, const at::Scalar & scalar) {
20202 // No device check
20203 // DeviceGuard omitted
20204 return at::native::foreach_tensor_sub_scalar_kernel_slow_(self, scalar);
20205}
20206} // anonymous namespace
20207namespace {
20208::std::vector<at::Tensor> wrapper_CPU_Scalar__foreach_mul(at::TensorList self, const at::Scalar & scalar) {
20209 // No device check
20210 // DeviceGuard omitted
20211 return at::native::foreach_tensor_mul_scalar_kernel_slow(self, scalar);
20212}
20213} // anonymous namespace
20214namespace {
20215void wrapper_CPU_Scalar__foreach_mul_(at::TensorList self, const at::Scalar & scalar) {
20216 // No device check
20217 // DeviceGuard omitted
20218 return at::native::foreach_tensor_mul_scalar_kernel_slow_(self, scalar);
20219}
20220} // anonymous namespace
20221namespace {
20222::std::vector<at::Tensor> wrapper_CPU_Scalar__foreach_div(at::TensorList self, const at::Scalar & scalar) {
20223 // No device check
20224 // DeviceGuard omitted
20225 return at::native::foreach_tensor_div_scalar_kernel_slow(self, scalar);
20226}
20227} // anonymous namespace
20228namespace {
20229void wrapper_CPU_Scalar__foreach_div_(at::TensorList self, const at::Scalar & scalar) {
20230 // No device check
20231 // DeviceGuard omitted
20232 return at::native::foreach_tensor_div_scalar_kernel_slow_(self, scalar);
20233}
20234} // anonymous namespace
20235namespace {
20236::std::vector<at::Tensor> wrapper_CPU_Scalar__foreach_clamp_min(at::TensorList self, const at::Scalar & scalar) {
20237 // No device check
20238 // DeviceGuard omitted
20239 return at::native::foreach_tensor_clamp_min_scalar_kernel_slow(self, scalar);
20240}
20241} // anonymous namespace
20242namespace {
20243void wrapper_CPU_Scalar__foreach_clamp_min_(at::TensorList self, const at::Scalar & scalar) {
20244 // No device check
20245 // DeviceGuard omitted
20246 return at::native::foreach_tensor_clamp_min_scalar_kernel_slow_(self, scalar);
20247}
20248} // anonymous namespace
20249namespace {
20250::std::vector<at::Tensor> wrapper_CPU_Scalar__foreach_clamp_max(at::TensorList self, const at::Scalar & scalar) {
20251 // No device check
20252 // DeviceGuard omitted
20253 return at::native::foreach_tensor_clamp_max_scalar_kernel_slow(self, scalar);
20254}
20255} // anonymous namespace
20256namespace {
20257void wrapper_CPU_Scalar__foreach_clamp_max_(at::TensorList self, const at::Scalar & scalar) {
20258 // No device check
20259 // DeviceGuard omitted
20260 return at::native::foreach_tensor_clamp_max_scalar_kernel_slow_(self, scalar);
20261}
20262} // anonymous namespace
20263namespace {
20264::std::vector<at::Tensor> wrapper_CPU_Scalar__foreach_maximum(at::TensorList self, const at::Scalar & scalar) {
20265 // No device check
20266 // DeviceGuard omitted
20267 return at::native::foreach_tensor_clamp_min_scalar_kernel_slow(self, scalar);
20268}
20269} // anonymous namespace
20270namespace {
20271void wrapper_CPU_Scalar__foreach_maximum_(at::TensorList self, const at::Scalar & scalar) {
20272 // No device check
20273 // DeviceGuard omitted
20274 return at::native::foreach_tensor_clamp_min_scalar_kernel_slow_(self, scalar);
20275}
20276} // anonymous namespace
20277namespace {
20278::std::vector<at::Tensor> wrapper_CPU_Scalar__foreach_minimum(at::TensorList self, const at::Scalar & scalar) {
20279 // No device check
20280 // DeviceGuard omitted
20281 return at::native::foreach_tensor_clamp_max_scalar_kernel_slow(self, scalar);
20282}
20283} // anonymous namespace
20284namespace {
20285void wrapper_CPU_Scalar__foreach_minimum_(at::TensorList self, const at::Scalar & scalar) {
20286 // No device check
20287 // DeviceGuard omitted
20288 return at::native::foreach_tensor_clamp_max_scalar_kernel_slow_(self, scalar);
20289}
20290} // anonymous namespace
20291namespace {
20292::std::vector<at::Tensor> wrapper_CPU_List__foreach_add(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
20293 // No device check
20294 // DeviceGuard omitted
20295 return at::native::foreach_tensor_add_list_kernel_slow(self, other, alpha);
20296}
20297} // anonymous namespace
20298namespace {
20299void wrapper_CPU_List__foreach_add_(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
20300 // No device check
20301 // DeviceGuard omitted
20302 return at::native::foreach_tensor_add_list_kernel_slow_(self, other, alpha);
20303}
20304} // anonymous namespace
20305namespace {
20306::std::vector<at::Tensor> wrapper_CPU_List__foreach_sub(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
20307 // No device check
20308 // DeviceGuard omitted
20309 return at::native::foreach_tensor_sub_list_kernel_slow(self, other, alpha);
20310}
20311} // anonymous namespace
20312namespace {
20313void wrapper_CPU_List__foreach_sub_(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
20314 // No device check
20315 // DeviceGuard omitted
20316 return at::native::foreach_tensor_sub_list_kernel_slow_(self, other, alpha);
20317}
20318} // anonymous namespace
20319namespace {
20320::std::vector<at::Tensor> wrapper_CPU_List__foreach_mul(at::TensorList self, at::TensorList other) {
20321 // No device check
20322 // DeviceGuard omitted
20323 return at::native::foreach_tensor_mul_list_kernel_slow(self, other);
20324}
20325} // anonymous namespace
20326namespace {
20327void wrapper_CPU_List__foreach_mul_(at::TensorList self, at::TensorList other) {
20328 // No device check
20329 // DeviceGuard omitted
20330 return at::native::foreach_tensor_mul_list_kernel_slow_(self, other);
20331}
20332} // anonymous namespace
20333namespace {
20334::std::vector<at::Tensor> wrapper_CPU_List__foreach_div(at::TensorList self, at::TensorList other) {
20335 // No device check
20336 // DeviceGuard omitted
20337 return at::native::foreach_tensor_div_list_kernel_slow(self, other);
20338}
20339} // anonymous namespace
20340namespace {
20341void wrapper_CPU_List__foreach_div_(at::TensorList self, at::TensorList other) {
20342 // No device check
20343 // DeviceGuard omitted
20344 return at::native::foreach_tensor_div_list_kernel_slow_(self, other);
20345}
20346} // anonymous namespace
20347namespace {
20348::std::vector<at::Tensor> wrapper_CPU_List__foreach_clamp_min(at::TensorList self, at::TensorList other) {
20349 // No device check
20350 // DeviceGuard omitted
20351 return at::native::foreach_tensor_clamp_min_list_kernel_slow(self, other);
20352}
20353} // anonymous namespace
20354namespace {
20355void wrapper_CPU_List__foreach_clamp_min_(at::TensorList self, at::TensorList other) {
20356 // No device check
20357 // DeviceGuard omitted
20358 return at::native::foreach_tensor_clamp_min_list_kernel_slow_(self, other);
20359}
20360} // anonymous namespace
20361namespace {
20362::std::vector<at::Tensor> wrapper_CPU_List__foreach_clamp_max(at::TensorList self, at::TensorList other) {
20363 // No device check
20364 // DeviceGuard omitted
20365 return at::native::foreach_tensor_clamp_max_list_kernel_slow(self, other);
20366}
20367} // anonymous namespace
20368namespace {
20369void wrapper_CPU_List__foreach_clamp_max_(at::TensorList self, at::TensorList other) {
20370 // No device check
20371 // DeviceGuard omitted
20372 return at::native::foreach_tensor_clamp_max_list_kernel_slow_(self, other);
20373}
20374} // anonymous namespace
20375namespace {
20376::std::vector<at::Tensor> wrapper_CPU_List__foreach_maximum(at::TensorList self, at::TensorList other) {
20377 // No device check
20378 // DeviceGuard omitted
20379 return at::native::foreach_tensor_clamp_min_list_kernel_slow(self, other);
20380}
20381} // anonymous namespace
20382namespace {
20383void wrapper_CPU_List__foreach_maximum_(at::TensorList self, at::TensorList other) {
20384 // No device check
20385 // DeviceGuard omitted
20386 return at::native::foreach_tensor_clamp_min_list_kernel_slow_(self, other);
20387}
20388} // anonymous namespace
20389namespace {
20390::std::vector<at::Tensor> wrapper_CPU_List__foreach_minimum(at::TensorList self, at::TensorList other) {
20391 // No device check
20392 // DeviceGuard omitted
20393 return at::native::foreach_tensor_clamp_max_list_kernel_slow(self, other);
20394}
20395} // anonymous namespace
20396namespace {
20397void wrapper_CPU_List__foreach_minimum_(at::TensorList self, at::TensorList other) {
20398 // No device check
20399 // DeviceGuard omitted
20400 return at::native::foreach_tensor_clamp_max_list_kernel_slow_(self, other);
20401}
20402} // anonymous namespace
20403namespace {
20404::std::vector<at::Tensor> wrapper_CPU_ScalarList__foreach_add(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20405 // No device check
20406 // DeviceGuard omitted
20407 return at::native::foreach_tensor_add_scalarlist_kernel_slow(self, scalars);
20408}
20409} // anonymous namespace
20410namespace {
20411void wrapper_CPU_ScalarList__foreach_add_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20412 // No device check
20413 // DeviceGuard omitted
20414 return at::native::foreach_tensor_add_scalarlist_kernel_slow_(self, scalars);
20415}
20416} // anonymous namespace
20417namespace {
20418::std::vector<at::Tensor> wrapper_CPU_ScalarList__foreach_sub(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20419 // No device check
20420 // DeviceGuard omitted
20421 return at::native::foreach_tensor_sub_scalarlist_kernel_slow(self, scalars);
20422}
20423} // anonymous namespace
20424namespace {
20425void wrapper_CPU_ScalarList__foreach_sub_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20426 // No device check
20427 // DeviceGuard omitted
20428 return at::native::foreach_tensor_sub_scalarlist_kernel_slow_(self, scalars);
20429}
20430} // anonymous namespace
20431namespace {
20432::std::vector<at::Tensor> wrapper_CPU_ScalarList__foreach_div(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20433 // No device check
20434 // DeviceGuard omitted
20435 return at::native::foreach_tensor_div_scalarlist_kernel_slow(self, scalars);
20436}
20437} // anonymous namespace
20438namespace {
20439void wrapper_CPU_ScalarList__foreach_div_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20440 // No device check
20441 // DeviceGuard omitted
20442 return at::native::foreach_tensor_div_scalarlist_kernel_slow_(self, scalars);
20443}
20444} // anonymous namespace
20445namespace {
20446::std::vector<at::Tensor> wrapper_CPU_ScalarList__foreach_mul(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20447 // No device check
20448 // DeviceGuard omitted
20449 return at::native::foreach_tensor_mul_scalarlist_kernel_slow(self, scalars);
20450}
20451} // anonymous namespace
20452namespace {
20453void wrapper_CPU_ScalarList__foreach_mul_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20454 // No device check
20455 // DeviceGuard omitted
20456 return at::native::foreach_tensor_mul_scalarlist_kernel_slow_(self, scalars);
20457}
20458} // anonymous namespace
20459namespace {
20460::std::vector<at::Tensor> wrapper_CPU_ScalarList__foreach_clamp_min(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20461 // No device check
20462 // DeviceGuard omitted
20463 return at::native::foreach_tensor_clamp_min_scalarlist_kernel_slow(self, scalars);
20464}
20465} // anonymous namespace
20466namespace {
20467void wrapper_CPU_ScalarList__foreach_clamp_min_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20468 // No device check
20469 // DeviceGuard omitted
20470 return at::native::foreach_tensor_clamp_min_scalarlist_kernel_slow_(self, scalars);
20471}
20472} // anonymous namespace
20473namespace {
20474::std::vector<at::Tensor> wrapper_CPU_ScalarList__foreach_clamp_max(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20475 // No device check
20476 // DeviceGuard omitted
20477 return at::native::foreach_tensor_clamp_max_scalarlist_kernel_slow(self, scalars);
20478}
20479} // anonymous namespace
20480namespace {
20481void wrapper_CPU_ScalarList__foreach_clamp_max_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20482 // No device check
20483 // DeviceGuard omitted
20484 return at::native::foreach_tensor_clamp_max_scalarlist_kernel_slow_(self, scalars);
20485}
20486} // anonymous namespace
20487namespace {
20488::std::vector<at::Tensor> wrapper_CPU_ScalarList__foreach_maximum(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20489 // No device check
20490 // DeviceGuard omitted
20491 return at::native::foreach_tensor_clamp_min_scalarlist_kernel_slow(self, scalars);
20492}
20493} // anonymous namespace
20494namespace {
20495void wrapper_CPU_ScalarList__foreach_maximum_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20496 // No device check
20497 // DeviceGuard omitted
20498 return at::native::foreach_tensor_clamp_min_scalarlist_kernel_slow_(self, scalars);
20499}
20500} // anonymous namespace
20501namespace {
20502::std::vector<at::Tensor> wrapper_CPU_ScalarList__foreach_minimum(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20503 // No device check
20504 // DeviceGuard omitted
20505 return at::native::foreach_tensor_clamp_max_scalarlist_kernel_slow(self, scalars);
20506}
20507} // anonymous namespace
20508namespace {
20509void wrapper_CPU_ScalarList__foreach_minimum_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
20510 // No device check
20511 // DeviceGuard omitted
20512 return at::native::foreach_tensor_clamp_max_scalarlist_kernel_slow_(self, scalars);
20513}
20514} // anonymous namespace
20515namespace {
20516::std::vector<at::Tensor> wrapper_CPU___foreach_exp(at::TensorList self) {
20517 // No device check
20518 // DeviceGuard omitted
20519 return at::native::foreach_tensor_exp_slow(self);
20520}
20521} // anonymous namespace
20522namespace {
20523void wrapper_CPU___foreach_exp_(at::TensorList self) {
20524 // No device check
20525 // DeviceGuard omitted
20526 return at::native::foreach_tensor_exp_slow_(self);
20527}
20528} // anonymous namespace
20529namespace {
20530void wrapper_CPU___foreach_zero_(at::TensorList self) {
20531 // No device check
20532 // DeviceGuard omitted
20533 return at::native::foreach_tensor_zero_slow_(self);
20534}
20535} // anonymous namespace
20536namespace {
20537::std::vector<at::Tensor> wrapper_CPU___foreach_sqrt(at::TensorList self) {
20538 // No device check
20539 // DeviceGuard omitted
20540 return at::native::foreach_tensor_sqrt_slow(self);
20541}
20542} // anonymous namespace
20543namespace {
20544void wrapper_CPU___foreach_sqrt_(at::TensorList self) {
20545 // No device check
20546 // DeviceGuard omitted
20547 return at::native::foreach_tensor_sqrt_slow_(self);
20548}
20549} // anonymous namespace
20550namespace {
20551::std::vector<at::Tensor> wrapper_CPU___foreach_abs(at::TensorList self) {
20552 // No device check
20553 // DeviceGuard omitted
20554 return at::native::foreach_tensor_abs_slow(self);
20555}
20556} // anonymous namespace
20557namespace {
20558void wrapper_CPU___foreach_abs_(at::TensorList self) {
20559 // No device check
20560 // DeviceGuard omitted
20561 return at::native::foreach_tensor_abs_slow_(self);
20562}
20563} // anonymous namespace
20564namespace {
20565::std::vector<at::Tensor> wrapper_CPU___foreach_acos(at::TensorList self) {
20566 // No device check
20567 // DeviceGuard omitted
20568 return at::native::foreach_tensor_acos_slow(self);
20569}
20570} // anonymous namespace
20571namespace {
20572void wrapper_CPU___foreach_acos_(at::TensorList self) {
20573 // No device check
20574 // DeviceGuard omitted
20575 return at::native::foreach_tensor_acos_slow_(self);
20576}
20577} // anonymous namespace
20578namespace {
20579::std::vector<at::Tensor> wrapper_CPU___foreach_asin(at::TensorList self) {
20580 // No device check
20581 // DeviceGuard omitted
20582 return at::native::foreach_tensor_asin_slow(self);
20583}
20584} // anonymous namespace
20585namespace {
20586void wrapper_CPU___foreach_asin_(at::TensorList self) {
20587 // No device check
20588 // DeviceGuard omitted
20589 return at::native::foreach_tensor_asin_slow_(self);
20590}
20591} // anonymous namespace
20592namespace {
20593::std::vector<at::Tensor> wrapper_CPU___foreach_atan(at::TensorList self) {
20594 // No device check
20595 // DeviceGuard omitted
20596 return at::native::foreach_tensor_atan_slow(self);
20597}
20598} // anonymous namespace
20599namespace {
20600void wrapper_CPU___foreach_atan_(at::TensorList self) {
20601 // No device check
20602 // DeviceGuard omitted
20603 return at::native::foreach_tensor_atan_slow_(self);
20604}
20605} // anonymous namespace
20606namespace {
20607::std::vector<at::Tensor> wrapper_CPU___foreach_ceil(at::TensorList self) {
20608 // No device check
20609 // DeviceGuard omitted
20610 return at::native::foreach_tensor_ceil_slow(self);
20611}
20612} // anonymous namespace
20613namespace {
20614void wrapper_CPU___foreach_ceil_(at::TensorList self) {
20615 // No device check
20616 // DeviceGuard omitted
20617 return at::native::foreach_tensor_ceil_slow_(self);
20618}
20619} // anonymous namespace
20620namespace {
20621::std::vector<at::Tensor> wrapper_CPU___foreach_cos(at::TensorList self) {
20622 // No device check
20623 // DeviceGuard omitted
20624 return at::native::foreach_tensor_cos_slow(self);
20625}
20626} // anonymous namespace
20627namespace {
20628void wrapper_CPU___foreach_cos_(at::TensorList self) {
20629 // No device check
20630 // DeviceGuard omitted
20631 return at::native::foreach_tensor_cos_slow_(self);
20632}
20633} // anonymous namespace
20634namespace {
20635::std::vector<at::Tensor> wrapper_CPU___foreach_cosh(at::TensorList self) {
20636 // No device check
20637 // DeviceGuard omitted
20638 return at::native::foreach_tensor_cosh_slow(self);
20639}
20640} // anonymous namespace
20641namespace {
20642void wrapper_CPU___foreach_cosh_(at::TensorList self) {
20643 // No device check
20644 // DeviceGuard omitted
20645 return at::native::foreach_tensor_cosh_slow_(self);
20646}
20647} // anonymous namespace
20648namespace {
20649::std::vector<at::Tensor> wrapper_CPU___foreach_erf(at::TensorList self) {
20650 // No device check
20651 // DeviceGuard omitted
20652 return at::native::foreach_tensor_erf_slow(self);
20653}
20654} // anonymous namespace
20655namespace {
20656void wrapper_CPU___foreach_erf_(at::TensorList self) {
20657 // No device check
20658 // DeviceGuard omitted
20659 return at::native::foreach_tensor_erf_slow_(self);
20660}
20661} // anonymous namespace
20662namespace {
20663::std::vector<at::Tensor> wrapper_CPU___foreach_erfc(at::TensorList self) {
20664 // No device check
20665 // DeviceGuard omitted
20666 return at::native::foreach_tensor_erfc_slow(self);
20667}
20668} // anonymous namespace
20669namespace {
20670void wrapper_CPU___foreach_erfc_(at::TensorList self) {
20671 // No device check
20672 // DeviceGuard omitted
20673 return at::native::foreach_tensor_erfc_slow_(self);
20674}
20675} // anonymous namespace
20676namespace {
20677::std::vector<at::Tensor> wrapper_CPU___foreach_expm1(at::TensorList self) {
20678 // No device check
20679 // DeviceGuard omitted
20680 return at::native::foreach_tensor_expm1_slow(self);
20681}
20682} // anonymous namespace
20683namespace {
20684void wrapper_CPU___foreach_expm1_(at::TensorList self) {
20685 // No device check
20686 // DeviceGuard omitted
20687 return at::native::foreach_tensor_expm1_slow_(self);
20688}
20689} // anonymous namespace
20690namespace {
20691::std::vector<at::Tensor> wrapper_CPU___foreach_floor(at::TensorList self) {
20692 // No device check
20693 // DeviceGuard omitted
20694 return at::native::foreach_tensor_floor_slow(self);
20695}
20696} // anonymous namespace
20697namespace {
20698void wrapper_CPU___foreach_floor_(at::TensorList self) {
20699 // No device check
20700 // DeviceGuard omitted
20701 return at::native::foreach_tensor_floor_slow_(self);
20702}
20703} // anonymous namespace
20704namespace {
20705::std::vector<at::Tensor> wrapper_CPU___foreach_log(at::TensorList self) {
20706 // No device check
20707 // DeviceGuard omitted
20708 return at::native::foreach_tensor_log_slow(self);
20709}
20710} // anonymous namespace
20711namespace {
20712void wrapper_CPU___foreach_log_(at::TensorList self) {
20713 // No device check
20714 // DeviceGuard omitted
20715 return at::native::foreach_tensor_log_slow_(self);
20716}
20717} // anonymous namespace
20718namespace {
20719::std::vector<at::Tensor> wrapper_CPU___foreach_log10(at::TensorList self) {
20720 // No device check
20721 // DeviceGuard omitted
20722 return at::native::foreach_tensor_log10_slow(self);
20723}
20724} // anonymous namespace
20725namespace {
20726void wrapper_CPU___foreach_log10_(at::TensorList self) {
20727 // No device check
20728 // DeviceGuard omitted
20729 return at::native::foreach_tensor_log10_slow_(self);
20730}
20731} // anonymous namespace
20732namespace {
20733::std::vector<at::Tensor> wrapper_CPU___foreach_log1p(at::TensorList self) {
20734 // No device check
20735 // DeviceGuard omitted
20736 return at::native::foreach_tensor_log1p_slow(self);
20737}
20738} // anonymous namespace
20739namespace {
20740void wrapper_CPU___foreach_log1p_(at::TensorList self) {
20741 // No device check
20742 // DeviceGuard omitted
20743 return at::native::foreach_tensor_log1p_slow_(self);
20744}
20745} // anonymous namespace
20746namespace {
20747::std::vector<at::Tensor> wrapper_CPU___foreach_log2(at::TensorList self) {
20748 // No device check
20749 // DeviceGuard omitted
20750 return at::native::foreach_tensor_log2_slow(self);
20751}
20752} // anonymous namespace
20753namespace {
20754void wrapper_CPU___foreach_log2_(at::TensorList self) {
20755 // No device check
20756 // DeviceGuard omitted
20757 return at::native::foreach_tensor_log2_slow_(self);
20758}
20759} // anonymous namespace
20760namespace {
20761::std::vector<at::Tensor> wrapper_CPU___foreach_neg(at::TensorList self) {
20762 // No device check
20763 // DeviceGuard omitted
20764 return at::native::foreach_tensor_neg_slow(self);
20765}
20766} // anonymous namespace
20767namespace {
20768void wrapper_CPU___foreach_neg_(at::TensorList self) {
20769 // No device check
20770 // DeviceGuard omitted
20771 return at::native::foreach_tensor_neg_slow_(self);
20772}
20773} // anonymous namespace
20774namespace {
20775::std::vector<at::Tensor> wrapper_CPU___foreach_tan(at::TensorList self) {
20776 // No device check
20777 // DeviceGuard omitted
20778 return at::native::foreach_tensor_tan_slow(self);
20779}
20780} // anonymous namespace
20781namespace {
20782void wrapper_CPU___foreach_tan_(at::TensorList self) {
20783 // No device check
20784 // DeviceGuard omitted
20785 return at::native::foreach_tensor_tan_slow_(self);
20786}
20787} // anonymous namespace
20788namespace {
20789::std::vector<at::Tensor> wrapper_CPU___foreach_tanh(at::TensorList self) {
20790 // No device check
20791 // DeviceGuard omitted
20792 return at::native::foreach_tensor_tanh_slow(self);
20793}
20794} // anonymous namespace
20795namespace {
20796void wrapper_CPU___foreach_tanh_(at::TensorList self) {
20797 // No device check
20798 // DeviceGuard omitted
20799 return at::native::foreach_tensor_tanh_slow_(self);
20800}
20801} // anonymous namespace
20802namespace {
20803::std::vector<at::Tensor> wrapper_CPU___foreach_sin(at::TensorList self) {
20804 // No device check
20805 // DeviceGuard omitted
20806 return at::native::foreach_tensor_sin_slow(self);
20807}
20808} // anonymous namespace
20809namespace {
20810void wrapper_CPU___foreach_sin_(at::TensorList self) {
20811 // No device check
20812 // DeviceGuard omitted
20813 return at::native::foreach_tensor_sin_slow_(self);
20814}
20815} // anonymous namespace
20816namespace {
20817::std::vector<at::Tensor> wrapper_CPU___foreach_sinh(at::TensorList self) {
20818 // No device check
20819 // DeviceGuard omitted
20820 return at::native::foreach_tensor_sinh_slow(self);
20821}
20822} // anonymous namespace
20823namespace {
20824void wrapper_CPU___foreach_sinh_(at::TensorList self) {
20825 // No device check
20826 // DeviceGuard omitted
20827 return at::native::foreach_tensor_sinh_slow_(self);
20828}
20829} // anonymous namespace
20830namespace {
20831::std::vector<at::Tensor> wrapper_CPU___foreach_round(at::TensorList self) {
20832 // No device check
20833 // DeviceGuard omitted
20834 return at::native::foreach_tensor_round_slow(self);
20835}
20836} // anonymous namespace
20837namespace {
20838void wrapper_CPU___foreach_round_(at::TensorList self) {
20839 // No device check
20840 // DeviceGuard omitted
20841 return at::native::foreach_tensor_round_slow_(self);
20842}
20843} // anonymous namespace
20844namespace {
20845::std::vector<at::Tensor> wrapper_CPU___foreach_lgamma(at::TensorList self) {
20846 // No device check
20847 // DeviceGuard omitted
20848 return at::native::foreach_tensor_lgamma_slow(self);
20849}
20850} // anonymous namespace
20851namespace {
20852void wrapper_CPU___foreach_lgamma_(at::TensorList self) {
20853 // No device check
20854 // DeviceGuard omitted
20855 return at::native::foreach_tensor_lgamma_slow_(self);
20856}
20857} // anonymous namespace
20858namespace {
20859::std::vector<at::Tensor> wrapper_CPU___foreach_frac(at::TensorList self) {
20860 // No device check
20861 // DeviceGuard omitted
20862 return at::native::foreach_tensor_frac_slow(self);
20863}
20864} // anonymous namespace
20865namespace {
20866void wrapper_CPU___foreach_frac_(at::TensorList self) {
20867 // No device check
20868 // DeviceGuard omitted
20869 return at::native::foreach_tensor_frac_slow_(self);
20870}
20871} // anonymous namespace
20872namespace {
20873::std::vector<at::Tensor> wrapper_CPU___foreach_reciprocal(at::TensorList self) {
20874 // No device check
20875 // DeviceGuard omitted
20876 return at::native::foreach_tensor_reciprocal_slow(self);
20877}
20878} // anonymous namespace
20879namespace {
20880void wrapper_CPU___foreach_reciprocal_(at::TensorList self) {
20881 // No device check
20882 // DeviceGuard omitted
20883 return at::native::foreach_tensor_reciprocal_slow_(self);
20884}
20885} // anonymous namespace
20886namespace {
20887::std::vector<at::Tensor> wrapper_CPU___foreach_sigmoid(at::TensorList self) {
20888 // No device check
20889 // DeviceGuard omitted
20890 return at::native::foreach_tensor_sigmoid_slow(self);
20891}
20892} // anonymous namespace
20893namespace {
20894void wrapper_CPU___foreach_sigmoid_(at::TensorList self) {
20895 // No device check
20896 // DeviceGuard omitted
20897 return at::native::foreach_tensor_sigmoid_slow_(self);
20898}
20899} // anonymous namespace
20900namespace {
20901::std::vector<at::Tensor> wrapper_CPU___foreach_trunc(at::TensorList self) {
20902 // No device check
20903 // DeviceGuard omitted
20904 return at::native::foreach_tensor_trunc_slow(self);
20905}
20906} // anonymous namespace
20907namespace {
20908void wrapper_CPU___foreach_trunc_(at::TensorList self) {
20909 // No device check
20910 // DeviceGuard omitted
20911 return at::native::foreach_tensor_trunc_slow_(self);
20912}
20913} // anonymous namespace
20914namespace {
20915::std::vector<at::Tensor> wrapper_CPU_Scalar__foreach_addcdiv(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
20916 // No device check
20917 // DeviceGuard omitted
20918 return at::native::foreach_tensor_addcdiv_scalar_slow(self, tensor1, tensor2, value);
20919}
20920} // anonymous namespace
20921namespace {
20922void wrapper_CPU_Scalar__foreach_addcdiv_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
20923 // No device check
20924 // DeviceGuard omitted
20925 return at::native::foreach_tensor_addcdiv_scalar_slow_(self, tensor1, tensor2, value);
20926}
20927} // anonymous namespace
20928namespace {
20929::std::vector<at::Tensor> wrapper_CPU_Scalar__foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
20930 // No device check
20931 // DeviceGuard omitted
20932 return at::native::foreach_tensor_addcmul_scalar_slow(self, tensor1, tensor2, value);
20933}
20934} // anonymous namespace
20935namespace {
20936void wrapper_CPU_Scalar__foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
20937 // No device check
20938 // DeviceGuard omitted
20939 return at::native::foreach_tensor_addcmul_scalar_slow_(self, tensor1, tensor2, value);
20940}
20941} // anonymous namespace
20942namespace {
20943::std::vector<at::Tensor> wrapper_CPU_ScalarList__foreach_addcdiv(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
20944 // No device check
20945 // DeviceGuard omitted
20946 return at::native::foreach_tensor_addcdiv_scalarlist_slow(self, tensor1, tensor2, scalars);
20947}
20948} // anonymous namespace
20949namespace {
20950void wrapper_CPU_ScalarList__foreach_addcdiv_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
20951 // No device check
20952 // DeviceGuard omitted
20953 return at::native::foreach_tensor_addcdiv_scalarlist_slow_(self, tensor1, tensor2, scalars);
20954}
20955} // anonymous namespace
20956namespace {
20957::std::vector<at::Tensor> wrapper_CPU_Tensor__foreach_addcdiv(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
20958 // No device check
20959 // DeviceGuard omitted
20960 return at::native::foreach_tensor_addcdiv_tensor_slow(self, tensor1, tensor2, scalars);
20961}
20962} // anonymous namespace
20963namespace {
20964void wrapper_CPU_Tensor__foreach_addcdiv_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
20965 // No device check
20966 // DeviceGuard omitted
20967 return at::native::foreach_tensor_addcdiv_tensor_slow_(self, tensor1, tensor2, scalars);
20968}
20969} // anonymous namespace
20970namespace {
20971::std::vector<at::Tensor> wrapper_CPU_ScalarList__foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
20972 // No device check
20973 // DeviceGuard omitted
20974 return at::native::foreach_tensor_addcmul_scalarlist_slow(self, tensor1, tensor2, scalars);
20975}
20976} // anonymous namespace
20977namespace {
20978void wrapper_CPU_ScalarList__foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
20979 // No device check
20980 // DeviceGuard omitted
20981 return at::native::foreach_tensor_addcmul_scalarlist_slow_(self, tensor1, tensor2, scalars);
20982}
20983} // anonymous namespace
20984namespace {
20985::std::vector<at::Tensor> wrapper_CPU_Tensor__foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
20986 // No device check
20987 // DeviceGuard omitted
20988 return at::native::foreach_tensor_addcmul_tensor_slow(self, tensor1, tensor2, scalars);
20989}
20990} // anonymous namespace
20991namespace {
20992void wrapper_CPU_Tensor__foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
20993 // No device check
20994 // DeviceGuard omitted
20995 return at::native::foreach_tensor_addcmul_tensor_slow_(self, tensor1, tensor2, scalars);
20996}
20997} // anonymous namespace
20998namespace {
20999::std::vector<at::Tensor> wrapper_CPU_Scalar__foreach_norm(at::TensorList self, const at::Scalar & ord) {
21000 // No device check
21001 // DeviceGuard omitted
21002 return at::native::foreach_tensor_norm_slow(self, ord);
21003}
21004} // anonymous namespace
21005namespace {
21006::std::vector<at::Tensor> wrapper_CPU_List__foreach_lerp(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
21007 // No device check
21008 // DeviceGuard omitted
21009 return at::native::foreach_tensor_ternary_lerp_slow(self, tensors1, weights);
21010}
21011} // anonymous namespace
21012namespace {
21013void wrapper_CPU_List__foreach_lerp_(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
21014 // No device check
21015 // DeviceGuard omitted
21016 return at::native::foreach_tensor_ternary_lerp_slow_(self, tensors1, weights);
21017}
21018} // anonymous namespace
21019namespace {
21020::std::vector<at::Tensor> wrapper_CPU_Scalar__foreach_lerp(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
21021 // No device check
21022 // DeviceGuard omitted
21023 return at::native::foreach_tensor_lerp_list_kernel_slow(self, tensors1, weight);
21024}
21025} // anonymous namespace
21026namespace {
21027void wrapper_CPU_Scalar__foreach_lerp_(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
21028 // No device check
21029 // DeviceGuard omitted
21030 return at::native::foreach_tensor_lerp_list_kernel_slow_(self, tensors1, weight);
21031}
21032} // anonymous namespace
21033namespace {
21034at::Tensor wrapper_CPU_Tensor_bucketize(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
21035 // No device check
21036 // DeviceGuard omitted
21037 return at::native::bucketize_cpu(self, boundaries, out_int32, right);
21038}
21039} // anonymous namespace
21040namespace {
21041at::Tensor & wrapper_CPU_Tensor_out_bucketize_out(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
21042 // No device check
21043 // DeviceGuard omitted
21044 return at::native::bucketize_out_cpu(self, boundaries, out_int32, right, out);
21045}
21046} // anonymous namespace
21047namespace {
21048at::Tensor wrapper_CPU_Scalar_bucketize(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) {
21049 // No device check
21050 // DeviceGuard omitted
21051 return at::native::bucketize_cpu(self, boundaries, out_int32, right);
21052}
21053} // anonymous namespace
21054namespace {
21055at::Tensor wrapper_CPU_Tensor_searchsorted(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
21056 // No device check
21057 // DeviceGuard omitted
21058 return at::native::searchsorted_cpu(sorted_sequence, self, out_int32, right, side, sorter);
21059}
21060} // anonymous namespace
21061namespace {
21062at::Tensor & wrapper_CPU_Tensor_out_searchsorted_out(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter, at::Tensor & out) {
21063 // No device check
21064 // DeviceGuard omitted
21065 return at::native::searchsorted_out_cpu(sorted_sequence, self, out_int32, right, side, sorter, out);
21066}
21067} // anonymous namespace
21068namespace {
21069at::Tensor wrapper_CPU_Scalar_searchsorted(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
21070 // No device check
21071 // DeviceGuard omitted
21072 return at::native::searchsorted_cpu(sorted_sequence, self, out_int32, right, side, sorter);
21073}
21074} // anonymous namespace
21075struct structured__convert_indices_from_coo_to_csr_structured_cpu_functional final : public at::native::structured__convert_indices_from_coo_to_csr_structured_cpu {
21076 void set_output_strided(
21077 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21078 TensorOptions options, DimnameList names
21079 ) override {
21080 outputs_[output_idx] = create_out(sizes, strides, options);
21081 if (!names.empty()) {
21082 namedinference::propagate_names(*outputs_[output_idx], names);
21083 }
21084 // super must happen after, so that downstream can use maybe_get_output
21085 // to retrieve the output
21086 }
21087 void set_output_raw_strided(
21088 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21089 TensorOptions options, DimnameList names
21090 ) override {
21091 outputs_[output_idx] = create_out(sizes, strides, options);
21092 if (!names.empty()) {
21093 namedinference::propagate_names(*outputs_[output_idx], names);
21094 }
21095 // super must happen after, so that downstream can use maybe_get_output
21096 // to retrieve the output
21097 }
21098 const Tensor& maybe_get_output(int64_t output_idx) override {
21099 return *outputs_[output_idx];
21100 }
21101 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21102};
21103at::Tensor wrapper_CPU__convert_indices_from_coo_to_csr(const at::Tensor & self, int64_t size, bool out_int32) {
21104structured__convert_indices_from_coo_to_csr_structured_cpu_functional op;
21105op.meta(self, size, out_int32);
21106op.impl(self, size, out_int32, *op.outputs_[0]);
21107return std::move(op.outputs_[0]).take();
21108}
21109struct structured__convert_indices_from_coo_to_csr_structured_cpu_out final : public at::native::structured__convert_indices_from_coo_to_csr_structured_cpu {
21110 structured__convert_indices_from_coo_to_csr_structured_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
21111 void set_output_strided(
21112 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21113 TensorOptions options, DimnameList names
21114 ) override {
21115 const auto& out = outputs_[output_idx].get();
21116 resize_out(out, sizes, strides, options);
21117 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
21118 if (C10_UNLIKELY(maybe_proxy.has_value())) {
21119 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
21120 }
21121 if (!names.empty()) {
21122 namedinference::propagate_names(outputs_[output_idx], names);
21123 }
21124 // super must happen after, so that downstream can use maybe_get_output
21125 // to retrieve the output
21126 }
21127 void set_output_raw_strided(
21128 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21129 TensorOptions options, DimnameList names
21130 ) override {
21131 const auto& out = outputs_[output_idx].get();
21132 resize_out(out, sizes, strides, options);
21133 if (!names.empty()) {
21134 namedinference::propagate_names(outputs_[output_idx], names);
21135 }
21136 // super must happen after, so that downstream can use maybe_get_output
21137 // to retrieve the output
21138 }
21139 const Tensor& maybe_get_output(int64_t output_idx) override {
21140 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
21141 }
21142 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
21143 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
21144};
21145at::Tensor & wrapper_CPU__convert_indices_from_coo_to_csr_out_out(const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out) {
21146structured__convert_indices_from_coo_to_csr_structured_cpu_out op(out);
21147op.meta(self, size, out_int32);
21148op.impl(self, size, out_int32, op.maybe_get_output(0));
21149if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
21150return out;
21151}
21152struct structured__convert_indices_from_csr_to_coo_structured_cpu_functional final : public at::native::structured__convert_indices_from_csr_to_coo_structured_cpu {
21153 void set_output_strided(
21154 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21155 TensorOptions options, DimnameList names
21156 ) override {
21157 outputs_[output_idx] = create_out(sizes, strides, options);
21158 if (!names.empty()) {
21159 namedinference::propagate_names(*outputs_[output_idx], names);
21160 }
21161 // super must happen after, so that downstream can use maybe_get_output
21162 // to retrieve the output
21163 }
21164 void set_output_raw_strided(
21165 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21166 TensorOptions options, DimnameList names
21167 ) override {
21168 outputs_[output_idx] = create_out(sizes, strides, options);
21169 if (!names.empty()) {
21170 namedinference::propagate_names(*outputs_[output_idx], names);
21171 }
21172 // super must happen after, so that downstream can use maybe_get_output
21173 // to retrieve the output
21174 }
21175 const Tensor& maybe_get_output(int64_t output_idx) override {
21176 return *outputs_[output_idx];
21177 }
21178 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21179};
21180at::Tensor wrapper_CPU__convert_indices_from_csr_to_coo(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
21181structured__convert_indices_from_csr_to_coo_structured_cpu_functional op;
21182op.meta(crow_indices, col_indices, out_int32, transpose);
21183op.impl(crow_indices, col_indices, out_int32, transpose, *op.outputs_[0]);
21184return std::move(op.outputs_[0]).take();
21185}
21186struct structured__convert_indices_from_csr_to_coo_structured_cpu_out final : public at::native::structured__convert_indices_from_csr_to_coo_structured_cpu {
21187 structured__convert_indices_from_csr_to_coo_structured_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
21188 void set_output_strided(
21189 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21190 TensorOptions options, DimnameList names
21191 ) override {
21192 const auto& out = outputs_[output_idx].get();
21193 resize_out(out, sizes, strides, options);
21194 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
21195 if (C10_UNLIKELY(maybe_proxy.has_value())) {
21196 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
21197 }
21198 if (!names.empty()) {
21199 namedinference::propagate_names(outputs_[output_idx], names);
21200 }
21201 // super must happen after, so that downstream can use maybe_get_output
21202 // to retrieve the output
21203 }
21204 void set_output_raw_strided(
21205 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21206 TensorOptions options, DimnameList names
21207 ) override {
21208 const auto& out = outputs_[output_idx].get();
21209 resize_out(out, sizes, strides, options);
21210 if (!names.empty()) {
21211 namedinference::propagate_names(outputs_[output_idx], names);
21212 }
21213 // super must happen after, so that downstream can use maybe_get_output
21214 // to retrieve the output
21215 }
21216 const Tensor& maybe_get_output(int64_t output_idx) override {
21217 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
21218 }
21219 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
21220 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
21221};
21222at::Tensor & wrapper_CPU__convert_indices_from_csr_to_coo_out_out(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out) {
21223structured__convert_indices_from_csr_to_coo_structured_cpu_out op(out);
21224op.meta(crow_indices, col_indices, out_int32, transpose);
21225op.impl(crow_indices, col_indices, out_int32, transpose, op.maybe_get_output(0));
21226if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
21227return out;
21228}
21229struct structured_mse_loss_out_functional final : public at::native::structured_mse_loss_out {
21230 void set_output_strided(
21231 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21232 TensorOptions options, DimnameList names
21233 ) override {
21234 outputs_[output_idx] = create_out(sizes, strides, options);
21235 if (!names.empty()) {
21236 namedinference::propagate_names(*outputs_[output_idx], names);
21237 }
21238 // super must happen after, so that downstream can use maybe_get_output
21239 // to retrieve the output
21240 at::native::structured_mse_loss_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21241 }
21242 void set_output_raw_strided(
21243 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21244 TensorOptions options, DimnameList names
21245 ) override {
21246 outputs_[output_idx] = create_out(sizes, strides, options);
21247 if (!names.empty()) {
21248 namedinference::propagate_names(*outputs_[output_idx], names);
21249 }
21250 // super must happen after, so that downstream can use maybe_get_output
21251 // to retrieve the output
21252 at::native::structured_mse_loss_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21253 }
21254 const Tensor& maybe_get_output(int64_t output_idx) override {
21255 return *outputs_[output_idx];
21256 }
21257 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21258};
21259at::Tensor wrapper_CPU_mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
21260structured_mse_loss_out_functional op;
21261op.meta(self, target, reduction);
21262op.impl(self, target, reduction, *op.outputs_[0]);
21263return std::move(op.outputs_[0]).take();
21264}
21265struct structured_mse_loss_out_out final : public at::native::structured_mse_loss_out {
21266 structured_mse_loss_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
21267 void set_output_strided(
21268 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21269 TensorOptions options, DimnameList names
21270 ) override {
21271 const auto& out = outputs_[output_idx].get();
21272 resize_out(out, sizes, strides, options);
21273 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
21274 if (C10_UNLIKELY(maybe_proxy.has_value())) {
21275 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
21276 }
21277 if (!names.empty()) {
21278 namedinference::propagate_names(outputs_[output_idx], names);
21279 }
21280 // super must happen after, so that downstream can use maybe_get_output
21281 // to retrieve the output
21282 at::native::structured_mse_loss_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21283 }
21284 void set_output_raw_strided(
21285 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21286 TensorOptions options, DimnameList names
21287 ) override {
21288 const auto& out = outputs_[output_idx].get();
21289 resize_out(out, sizes, strides, options);
21290 if (!names.empty()) {
21291 namedinference::propagate_names(outputs_[output_idx], names);
21292 }
21293 // super must happen after, so that downstream can use maybe_get_output
21294 // to retrieve the output
21295 at::native::structured_mse_loss_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21296 }
21297 const Tensor& maybe_get_output(int64_t output_idx) override {
21298 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
21299 }
21300 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
21301 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
21302};
21303at::Tensor & wrapper_CPU_mse_loss_out_out(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
21304structured_mse_loss_out_out op(out);
21305op.meta(self, target, reduction);
21306op.impl(self, target, reduction, op.maybe_get_output(0));
21307if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
21308return out;
21309}
21310namespace {
21311at::Tensor wrapper_CPU__mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
21312 // No device check
21313 // DeviceGuard omitted
21314 return at::native::mse_loss_backward(grad_output, self, target, reduction);
21315}
21316} // anonymous namespace
21317namespace {
21318at::Tensor & wrapper_CPU_grad_input_mse_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
21319 // No device check
21320 // DeviceGuard omitted
21321 return at::native::mse_loss_backward_out(grad_output, self, target, reduction, grad_input);
21322}
21323} // anonymous namespace
21324namespace {
21325at::Tensor wrapper_CPU__multi_margin_loss(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
21326 // No device check
21327 // DeviceGuard omitted
21328 return at::native::multi_margin_loss_cpu(self, target, p, margin, weight, reduction);
21329}
21330} // anonymous namespace
21331namespace {
21332at::Tensor & wrapper_CPU_out_multi_margin_loss_out(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
21333 // No device check
21334 // DeviceGuard omitted
21335 return at::native::multi_margin_loss_cpu_out(self, target, p, margin, weight, reduction, out);
21336}
21337} // anonymous namespace
21338namespace {
21339at::Tensor wrapper_CPU__multi_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
21340 // No device check
21341 // DeviceGuard omitted
21342 return at::native::multi_margin_loss_cpu_backward(grad_output, self, target, p, margin, weight, reduction);
21343}
21344} // anonymous namespace
21345namespace {
21346at::Tensor & wrapper_CPU_grad_input_multi_margin_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
21347 // No device check
21348 // DeviceGuard omitted
21349 return at::native::multi_margin_loss_cpu_backward_out(grad_output, self, target, p, margin, weight, reduction, grad_input);
21350}
21351} // anonymous namespace
21352namespace {
21353::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__multilabel_margin_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
21354 // No device check
21355 // DeviceGuard omitted
21356 return at::native::multilabel_margin_loss_forward_cpu(self, target, reduction);
21357}
21358} // anonymous namespace
21359namespace {
21360::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_output_multilabel_margin_loss_forward_out(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & output, at::Tensor & is_target) {
21361 // No device check
21362 // DeviceGuard omitted
21363 return at::native::multilabel_margin_loss_forward_out_cpu(self, target, reduction, output, is_target);
21364}
21365} // anonymous namespace
21366namespace {
21367at::Tensor wrapper_CPU__multilabel_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
21368 // No device check
21369 // DeviceGuard omitted
21370 return at::native::multilabel_margin_loss_backward_cpu(grad_output, self, target, reduction, is_target);
21371}
21372} // anonymous namespace
21373namespace {
21374at::Tensor & wrapper_CPU_grad_input_multilabel_margin_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input) {
21375 // No device check
21376 // DeviceGuard omitted
21377 return at::native::multilabel_margin_loss_backward_cpu_out(grad_output, self, target, reduction, is_target, grad_input);
21378}
21379} // anonymous namespace
21380struct structured_nll_loss_forward_out_cpu_functional final : public at::native::structured_nll_loss_forward_out_cpu {
21381 void set_output_strided(
21382 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21383 TensorOptions options, DimnameList names
21384 ) override {
21385 outputs_[output_idx] = create_out(sizes, strides, options);
21386 if (!names.empty()) {
21387 namedinference::propagate_names(*outputs_[output_idx], names);
21388 }
21389 // super must happen after, so that downstream can use maybe_get_output
21390 // to retrieve the output
21391 }
21392 void set_output_raw_strided(
21393 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21394 TensorOptions options, DimnameList names
21395 ) override {
21396 outputs_[output_idx] = create_out(sizes, strides, options);
21397 if (!names.empty()) {
21398 namedinference::propagate_names(*outputs_[output_idx], names);
21399 }
21400 // super must happen after, so that downstream can use maybe_get_output
21401 // to retrieve the output
21402 }
21403 const Tensor& maybe_get_output(int64_t output_idx) override {
21404 return *outputs_[output_idx];
21405 }
21406 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
21407};
21408::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
21409structured_nll_loss_forward_out_cpu_functional op;
21410op.meta(self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index);
21411op.impl(self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index, *op.outputs_[0], *op.outputs_[1]);
21412return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
21413}
21414struct structured_nll_loss_forward_out_cpu_out final : public at::native::structured_nll_loss_forward_out_cpu {
21415 structured_nll_loss_forward_out_cpu_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
21416 void set_output_strided(
21417 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21418 TensorOptions options, DimnameList names
21419 ) override {
21420 const auto& out = outputs_[output_idx].get();
21421 resize_out(out, sizes, strides, options);
21422 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
21423 if (C10_UNLIKELY(maybe_proxy.has_value())) {
21424 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
21425 }
21426 if (!names.empty()) {
21427 namedinference::propagate_names(outputs_[output_idx], names);
21428 }
21429 // super must happen after, so that downstream can use maybe_get_output
21430 // to retrieve the output
21431 }
21432 void set_output_raw_strided(
21433 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21434 TensorOptions options, DimnameList names
21435 ) override {
21436 const auto& out = outputs_[output_idx].get();
21437 resize_out(out, sizes, strides, options);
21438 if (!names.empty()) {
21439 namedinference::propagate_names(outputs_[output_idx], names);
21440 }
21441 // super must happen after, so that downstream can use maybe_get_output
21442 // to retrieve the output
21443 }
21444 const Tensor& maybe_get_output(int64_t output_idx) override {
21445 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
21446 }
21447 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
21448 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
21449};
21450::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_nll_loss_forward_out_output(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) {
21451structured_nll_loss_forward_out_cpu_out op(output, total_weight);
21452op.meta(self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index);
21453op.impl(self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index, op.maybe_get_output(0), op.maybe_get_output(1));
21454if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
21455if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
21456return std::forward_as_tuple(output, total_weight);
21457}
21458struct structured_nll_loss_backward_out_cpu_functional final : public at::native::structured_nll_loss_backward_out_cpu {
21459 void set_output_strided(
21460 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21461 TensorOptions options, DimnameList names
21462 ) override {
21463 outputs_[output_idx] = create_out(sizes, strides, options);
21464 if (!names.empty()) {
21465 namedinference::propagate_names(*outputs_[output_idx], names);
21466 }
21467 // super must happen after, so that downstream can use maybe_get_output
21468 // to retrieve the output
21469 }
21470 void set_output_raw_strided(
21471 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21472 TensorOptions options, DimnameList names
21473 ) override {
21474 outputs_[output_idx] = create_out(sizes, strides, options);
21475 if (!names.empty()) {
21476 namedinference::propagate_names(*outputs_[output_idx], names);
21477 }
21478 // super must happen after, so that downstream can use maybe_get_output
21479 // to retrieve the output
21480 }
21481 const Tensor& maybe_get_output(int64_t output_idx) override {
21482 return *outputs_[output_idx];
21483 }
21484 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21485};
21486at::Tensor wrapper_CPU_nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
21487structured_nll_loss_backward_out_cpu_functional op;
21488op.meta(grad_output, self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index, total_weight);
21489op.impl(grad_output, self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index, total_weight, *op.outputs_[0]);
21490return std::move(op.outputs_[0]).take();
21491}
21492struct structured_nll_loss_backward_out_cpu_out final : public at::native::structured_nll_loss_backward_out_cpu {
21493 structured_nll_loss_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
21494 void set_output_strided(
21495 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21496 TensorOptions options, DimnameList names
21497 ) override {
21498 const auto& out = outputs_[output_idx].get();
21499 resize_out(out, sizes, strides, options);
21500 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
21501 if (C10_UNLIKELY(maybe_proxy.has_value())) {
21502 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
21503 }
21504 if (!names.empty()) {
21505 namedinference::propagate_names(outputs_[output_idx], names);
21506 }
21507 // super must happen after, so that downstream can use maybe_get_output
21508 // to retrieve the output
21509 }
21510 void set_output_raw_strided(
21511 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21512 TensorOptions options, DimnameList names
21513 ) override {
21514 const auto& out = outputs_[output_idx].get();
21515 resize_out(out, sizes, strides, options);
21516 if (!names.empty()) {
21517 namedinference::propagate_names(outputs_[output_idx], names);
21518 }
21519 // super must happen after, so that downstream can use maybe_get_output
21520 // to retrieve the output
21521 }
21522 const Tensor& maybe_get_output(int64_t output_idx) override {
21523 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
21524 }
21525 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
21526 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
21527};
21528at::Tensor & wrapper_CPU_nll_loss_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
21529structured_nll_loss_backward_out_cpu_out op(grad_input);
21530op.meta(grad_output, self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index, total_weight);
21531op.impl(grad_output, self, target, ((weight.has_value() && (*weight).defined()) ? at::OptionalTensorRef(*weight) : at::OptionalTensorRef()), reduction, ignore_index, total_weight, op.maybe_get_output(0));
21532if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
21533return grad_input;
21534}
21535namespace {
21536::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
21537 // No device check
21538 // DeviceGuard omitted
21539 return at::native::nll_loss2d_forward_cpu(self, target, weight, reduction, ignore_index.expect_int());
21540}
21541} // anonymous namespace
21542namespace {
21543::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_output_nll_loss2d_forward_out(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
21544 // No device check
21545 // DeviceGuard omitted
21546 return at::native::nll_loss2d_forward_out_cpu(self, target, weight, reduction, ignore_index.expect_int(), output, total_weight);
21547}
21548} // anonymous namespace
21549namespace {
21550at::Tensor wrapper_CPU__nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
21551 // No device check
21552 // DeviceGuard omitted
21553 return at::native::nll_loss2d_backward_cpu(grad_output, self, target, weight, reduction, ignore_index.expect_int(), total_weight);
21554}
21555} // anonymous namespace
21556namespace {
21557at::Tensor & wrapper_CPU_grad_input_nll_loss2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
21558 // No device check
21559 // DeviceGuard omitted
21560 return at::native::nll_loss2d_backward_out_cpu(grad_output, self, target, weight, reduction, ignore_index.expect_int(), total_weight, grad_input);
21561}
21562} // anonymous namespace
21563struct structured_smooth_l1_loss_out_functional final : public at::native::structured_smooth_l1_loss_out {
21564 void set_output_strided(
21565 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21566 TensorOptions options, DimnameList names
21567 ) override {
21568 outputs_[output_idx] = create_out(sizes, strides, options);
21569 if (!names.empty()) {
21570 namedinference::propagate_names(*outputs_[output_idx], names);
21571 }
21572 // super must happen after, so that downstream can use maybe_get_output
21573 // to retrieve the output
21574 at::native::structured_smooth_l1_loss_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21575 }
21576 void set_output_raw_strided(
21577 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21578 TensorOptions options, DimnameList names
21579 ) override {
21580 outputs_[output_idx] = create_out(sizes, strides, options);
21581 if (!names.empty()) {
21582 namedinference::propagate_names(*outputs_[output_idx], names);
21583 }
21584 // super must happen after, so that downstream can use maybe_get_output
21585 // to retrieve the output
21586 at::native::structured_smooth_l1_loss_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21587 }
21588 const Tensor& maybe_get_output(int64_t output_idx) override {
21589 return *outputs_[output_idx];
21590 }
21591 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21592};
21593at::Tensor wrapper_CPU_smooth_l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
21594structured_smooth_l1_loss_out_functional op;
21595op.meta(self, target, reduction, beta);
21596op.impl(self, target, reduction, beta, *op.outputs_[0]);
21597return std::move(op.outputs_[0]).take();
21598}
21599struct structured_smooth_l1_loss_out_out final : public at::native::structured_smooth_l1_loss_out {
21600 structured_smooth_l1_loss_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
21601 void set_output_strided(
21602 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21603 TensorOptions options, DimnameList names
21604 ) override {
21605 const auto& out = outputs_[output_idx].get();
21606 resize_out(out, sizes, strides, options);
21607 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
21608 if (C10_UNLIKELY(maybe_proxy.has_value())) {
21609 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
21610 }
21611 if (!names.empty()) {
21612 namedinference::propagate_names(outputs_[output_idx], names);
21613 }
21614 // super must happen after, so that downstream can use maybe_get_output
21615 // to retrieve the output
21616 at::native::structured_smooth_l1_loss_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21617 }
21618 void set_output_raw_strided(
21619 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21620 TensorOptions options, DimnameList names
21621 ) override {
21622 const auto& out = outputs_[output_idx].get();
21623 resize_out(out, sizes, strides, options);
21624 if (!names.empty()) {
21625 namedinference::propagate_names(outputs_[output_idx], names);
21626 }
21627 // super must happen after, so that downstream can use maybe_get_output
21628 // to retrieve the output
21629 at::native::structured_smooth_l1_loss_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21630 }
21631 const Tensor& maybe_get_output(int64_t output_idx) override {
21632 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
21633 }
21634 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
21635 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
21636};
21637at::Tensor & wrapper_CPU_smooth_l1_loss_out_out(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out) {
21638structured_smooth_l1_loss_out_out op(out);
21639op.meta(self, target, reduction, beta);
21640op.impl(self, target, reduction, beta, op.maybe_get_output(0));
21641if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
21642return out;
21643}
21644namespace {
21645at::Tensor & wrapper_CPU_grad_input_smooth_l1_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input) {
21646 // No device check
21647 // DeviceGuard omitted
21648 return at::native::smooth_l1_loss_backward_out(grad_output, self, target, reduction, beta, grad_input);
21649}
21650} // anonymous namespace
21651namespace {
21652at::Tensor wrapper_CPU__huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
21653 // No device check
21654 // DeviceGuard omitted
21655 return at::native::huber_loss(self, target, reduction, delta);
21656}
21657} // anonymous namespace
21658namespace {
21659at::Tensor & wrapper_CPU_out_huber_loss_out(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) {
21660 // No device check
21661 // DeviceGuard omitted
21662 return at::native::huber_loss_out(self, target, reduction, delta, out);
21663}
21664} // anonymous namespace
21665namespace {
21666at::Tensor & wrapper_CPU_out_huber_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) {
21667 // No device check
21668 // DeviceGuard omitted
21669 return at::native::huber_loss_backward_out(grad_output, self, target, reduction, delta, grad_input);
21670}
21671} // anonymous namespace
21672struct structured_elu_out_functional final : public at::native::structured_elu_out {
21673 void set_output_strided(
21674 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21675 TensorOptions options, DimnameList names
21676 ) override {
21677 outputs_[output_idx] = create_out(sizes, strides, options);
21678 if (!names.empty()) {
21679 namedinference::propagate_names(*outputs_[output_idx], names);
21680 }
21681 // super must happen after, so that downstream can use maybe_get_output
21682 // to retrieve the output
21683 at::native::structured_elu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21684 }
21685 void set_output_raw_strided(
21686 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21687 TensorOptions options, DimnameList names
21688 ) override {
21689 outputs_[output_idx] = create_out(sizes, strides, options);
21690 if (!names.empty()) {
21691 namedinference::propagate_names(*outputs_[output_idx], names);
21692 }
21693 // super must happen after, so that downstream can use maybe_get_output
21694 // to retrieve the output
21695 at::native::structured_elu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21696 }
21697 const Tensor& maybe_get_output(int64_t output_idx) override {
21698 return *outputs_[output_idx];
21699 }
21700 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21701};
21702at::Tensor wrapper_CPU_elu(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
21703structured_elu_out_functional op;
21704op.meta(self, alpha, scale, input_scale);
21705op.impl(self, alpha, scale, input_scale, *op.outputs_[0]);
21706return std::move(op.outputs_[0]).take();
21707}
21708struct structured_elu_out_out final : public at::native::structured_elu_out {
21709 structured_elu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
21710 void set_output_strided(
21711 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21712 TensorOptions options, DimnameList names
21713 ) override {
21714 const auto& out = outputs_[output_idx].get();
21715 resize_out(out, sizes, strides, options);
21716 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
21717 if (C10_UNLIKELY(maybe_proxy.has_value())) {
21718 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
21719 }
21720 if (!names.empty()) {
21721 namedinference::propagate_names(outputs_[output_idx], names);
21722 }
21723 // super must happen after, so that downstream can use maybe_get_output
21724 // to retrieve the output
21725 at::native::structured_elu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21726 }
21727 void set_output_raw_strided(
21728 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21729 TensorOptions options, DimnameList names
21730 ) override {
21731 const auto& out = outputs_[output_idx].get();
21732 resize_out(out, sizes, strides, options);
21733 if (!names.empty()) {
21734 namedinference::propagate_names(outputs_[output_idx], names);
21735 }
21736 // super must happen after, so that downstream can use maybe_get_output
21737 // to retrieve the output
21738 at::native::structured_elu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21739 }
21740 const Tensor& maybe_get_output(int64_t output_idx) override {
21741 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
21742 }
21743 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
21744 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
21745};
21746at::Tensor & wrapper_CPU_elu_out_out(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out) {
21747structured_elu_out_out op(out);
21748op.meta(self, alpha, scale, input_scale);
21749op.impl(self, alpha, scale, input_scale, op.maybe_get_output(0));
21750if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
21751return out;
21752}
21753struct structured_elu_out_inplace final : public at::native::structured_elu_out {
21754 structured_elu_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
21755 void set_output_strided(
21756 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21757 TensorOptions options, DimnameList names
21758 ) override {
21759 const auto& out = outputs_[output_idx].get();
21760 check_inplace(out, sizes, options);
21761 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
21762 if (C10_UNLIKELY(maybe_proxy.has_value())) {
21763 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
21764 }
21765 if (!names.empty()) {
21766 namedinference::propagate_names(outputs_[output_idx], names);
21767 }
21768 // super must happen after, so that downstream can use maybe_get_output
21769 // to retrieve the output
21770 at::native::structured_elu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21771 }
21772 void set_output_raw_strided(
21773 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21774 TensorOptions options, DimnameList names
21775 ) override {
21776 const auto& out = outputs_[output_idx].get();
21777 check_inplace(out, sizes, options);
21778 if (!names.empty()) {
21779 namedinference::propagate_names(outputs_[output_idx], names);
21780 }
21781 // super must happen after, so that downstream can use maybe_get_output
21782 // to retrieve the output
21783 at::native::structured_elu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21784 }
21785 const Tensor& maybe_get_output(int64_t output_idx) override {
21786 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
21787 }
21788 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
21789 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
21790};
21791at::Tensor & wrapper_CPU_elu_(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
21792structured_elu_out_inplace op(self);
21793op.meta(self, alpha, scale, input_scale);
21794op.impl(self, alpha, scale, input_scale, op.outputs_[0]);
21795if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
21796return self;
21797}
21798struct structured_elu_backward_out_functional final : public at::native::structured_elu_backward_out {
21799 void set_output_strided(
21800 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21801 TensorOptions options, DimnameList names
21802 ) override {
21803 outputs_[output_idx] = create_out(sizes, strides, options);
21804 if (!names.empty()) {
21805 namedinference::propagate_names(*outputs_[output_idx], names);
21806 }
21807 // super must happen after, so that downstream can use maybe_get_output
21808 // to retrieve the output
21809 at::native::structured_elu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21810 }
21811 void set_output_raw_strided(
21812 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21813 TensorOptions options, DimnameList names
21814 ) override {
21815 outputs_[output_idx] = create_out(sizes, strides, options);
21816 if (!names.empty()) {
21817 namedinference::propagate_names(*outputs_[output_idx], names);
21818 }
21819 // super must happen after, so that downstream can use maybe_get_output
21820 // to retrieve the output
21821 at::native::structured_elu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21822 }
21823 const Tensor& maybe_get_output(int64_t output_idx) override {
21824 return *outputs_[output_idx];
21825 }
21826 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21827};
21828at::Tensor wrapper_CPU_elu_backward(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
21829structured_elu_backward_out_functional op;
21830op.meta(grad_output, alpha, scale, input_scale, is_result, self_or_result);
21831op.impl(grad_output, alpha, scale, input_scale, is_result, self_or_result, *op.outputs_[0]);
21832return std::move(op.outputs_[0]).take();
21833}
21834struct structured_elu_backward_out_out final : public at::native::structured_elu_backward_out {
21835 structured_elu_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
21836 void set_output_strided(
21837 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21838 TensorOptions options, DimnameList names
21839 ) override {
21840 const auto& out = outputs_[output_idx].get();
21841 resize_out(out, sizes, strides, options);
21842 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
21843 if (C10_UNLIKELY(maybe_proxy.has_value())) {
21844 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
21845 }
21846 if (!names.empty()) {
21847 namedinference::propagate_names(outputs_[output_idx], names);
21848 }
21849 // super must happen after, so that downstream can use maybe_get_output
21850 // to retrieve the output
21851 at::native::structured_elu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21852 }
21853 void set_output_raw_strided(
21854 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21855 TensorOptions options, DimnameList names
21856 ) override {
21857 const auto& out = outputs_[output_idx].get();
21858 resize_out(out, sizes, strides, options);
21859 if (!names.empty()) {
21860 namedinference::propagate_names(outputs_[output_idx], names);
21861 }
21862 // super must happen after, so that downstream can use maybe_get_output
21863 // to retrieve the output
21864 at::native::structured_elu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21865 }
21866 const Tensor& maybe_get_output(int64_t output_idx) override {
21867 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
21868 }
21869 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
21870 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
21871};
21872at::Tensor & wrapper_CPU_elu_backward_out_grad_input(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) {
21873structured_elu_backward_out_out op(grad_input);
21874op.meta(grad_output, alpha, scale, input_scale, is_result, self_or_result);
21875op.impl(grad_output, alpha, scale, input_scale, is_result, self_or_result, op.maybe_get_output(0));
21876if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
21877return grad_input;
21878}
21879struct structured_glu_out_functional final : public at::native::structured_glu_out {
21880 void set_output_strided(
21881 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21882 TensorOptions options, DimnameList names
21883 ) override {
21884 outputs_[output_idx] = create_out(sizes, strides, options);
21885 if (!names.empty()) {
21886 namedinference::propagate_names(*outputs_[output_idx], names);
21887 }
21888 // super must happen after, so that downstream can use maybe_get_output
21889 // to retrieve the output
21890 at::native::structured_glu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21891 }
21892 void set_output_raw_strided(
21893 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21894 TensorOptions options, DimnameList names
21895 ) override {
21896 outputs_[output_idx] = create_out(sizes, strides, options);
21897 if (!names.empty()) {
21898 namedinference::propagate_names(*outputs_[output_idx], names);
21899 }
21900 // super must happen after, so that downstream can use maybe_get_output
21901 // to retrieve the output
21902 at::native::structured_glu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21903 }
21904 const Tensor& maybe_get_output(int64_t output_idx) override {
21905 return *outputs_[output_idx];
21906 }
21907 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
21908};
21909at::Tensor wrapper_CPU_glu(const at::Tensor & self, int64_t dim) {
21910structured_glu_out_functional op;
21911op.meta(self, dim);
21912op.impl(self, dim, *op.outputs_[0]);
21913return std::move(op.outputs_[0]).take();
21914}
21915struct structured_glu_out_out final : public at::native::structured_glu_out {
21916 structured_glu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
21917 void set_output_strided(
21918 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21919 TensorOptions options, DimnameList names
21920 ) override {
21921 const auto& out = outputs_[output_idx].get();
21922 resize_out(out, sizes, strides, options);
21923 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
21924 if (C10_UNLIKELY(maybe_proxy.has_value())) {
21925 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
21926 }
21927 if (!names.empty()) {
21928 namedinference::propagate_names(outputs_[output_idx], names);
21929 }
21930 // super must happen after, so that downstream can use maybe_get_output
21931 // to retrieve the output
21932 at::native::structured_glu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21933 }
21934 void set_output_raw_strided(
21935 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21936 TensorOptions options, DimnameList names
21937 ) override {
21938 const auto& out = outputs_[output_idx].get();
21939 resize_out(out, sizes, strides, options);
21940 if (!names.empty()) {
21941 namedinference::propagate_names(outputs_[output_idx], names);
21942 }
21943 // super must happen after, so that downstream can use maybe_get_output
21944 // to retrieve the output
21945 at::native::structured_glu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
21946 }
21947 const Tensor& maybe_get_output(int64_t output_idx) override {
21948 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
21949 }
21950 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
21951 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
21952};
21953at::Tensor & wrapper_CPU_glu_out_out(const at::Tensor & self, int64_t dim, at::Tensor & out) {
21954structured_glu_out_out op(out);
21955op.meta(self, dim);
21956op.impl(self, dim, op.maybe_get_output(0));
21957if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
21958return out;
21959}
21960namespace {
21961at::Tensor wrapper_CPU__glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
21962 // No device check
21963 // DeviceGuard omitted
21964 return at::native::glu_backward_cpu(grad_output, self, dim);
21965}
21966} // anonymous namespace
21967namespace {
21968at::Tensor & wrapper_CPU_grad_input_glu_backward_out(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) {
21969 // No device check
21970 // DeviceGuard omitted
21971 return at::native::glu_backward_cpu_out(grad_output, self, dim, grad_input);
21972}
21973} // anonymous namespace
21974namespace {
21975at::Tensor wrapper_CPU__glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
21976 // No device check
21977 // DeviceGuard omitted
21978 return at::native::glu_jvp(glu, x, dx, dim);
21979}
21980} // anonymous namespace
21981namespace {
21982at::Tensor wrapper_CPU__glu_backward_jvp(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
21983 // No device check
21984 // DeviceGuard omitted
21985 return at::native::glu_backward_jvp(grad_x, grad_glu, x, dgrad_glu, dx, dim);
21986}
21987} // anonymous namespace
21988struct structured_hardsigmoid_out_functional final : public at::native::structured_hardsigmoid_out {
21989 void set_output_strided(
21990 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
21991 TensorOptions options, DimnameList names
21992 ) override {
21993 outputs_[output_idx] = create_out(sizes, strides, options);
21994 if (!names.empty()) {
21995 namedinference::propagate_names(*outputs_[output_idx], names);
21996 }
21997 // super must happen after, so that downstream can use maybe_get_output
21998 // to retrieve the output
21999 at::native::structured_hardsigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22000 }
22001 void set_output_raw_strided(
22002 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22003 TensorOptions options, DimnameList names
22004 ) override {
22005 outputs_[output_idx] = create_out(sizes, strides, options);
22006 if (!names.empty()) {
22007 namedinference::propagate_names(*outputs_[output_idx], names);
22008 }
22009 // super must happen after, so that downstream can use maybe_get_output
22010 // to retrieve the output
22011 at::native::structured_hardsigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22012 }
22013 const Tensor& maybe_get_output(int64_t output_idx) override {
22014 return *outputs_[output_idx];
22015 }
22016 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
22017};
22018at::Tensor wrapper_CPU_hardsigmoid(const at::Tensor & self) {
22019structured_hardsigmoid_out_functional op;
22020op.meta(self);
22021op.impl(self, *op.outputs_[0]);
22022return std::move(op.outputs_[0]).take();
22023}
22024struct structured_hardsigmoid_out_out final : public at::native::structured_hardsigmoid_out {
22025 structured_hardsigmoid_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
22026 void set_output_strided(
22027 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22028 TensorOptions options, DimnameList names
22029 ) override {
22030 const auto& out = outputs_[output_idx].get();
22031 resize_out(out, sizes, strides, options);
22032 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
22033 if (C10_UNLIKELY(maybe_proxy.has_value())) {
22034 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
22035 }
22036 if (!names.empty()) {
22037 namedinference::propagate_names(outputs_[output_idx], names);
22038 }
22039 // super must happen after, so that downstream can use maybe_get_output
22040 // to retrieve the output
22041 at::native::structured_hardsigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22042 }
22043 void set_output_raw_strided(
22044 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22045 TensorOptions options, DimnameList names
22046 ) override {
22047 const auto& out = outputs_[output_idx].get();
22048 resize_out(out, sizes, strides, options);
22049 if (!names.empty()) {
22050 namedinference::propagate_names(outputs_[output_idx], names);
22051 }
22052 // super must happen after, so that downstream can use maybe_get_output
22053 // to retrieve the output
22054 at::native::structured_hardsigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22055 }
22056 const Tensor& maybe_get_output(int64_t output_idx) override {
22057 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
22058 }
22059 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
22060 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
22061};
22062at::Tensor & wrapper_CPU_hardsigmoid_out_out(const at::Tensor & self, at::Tensor & out) {
22063structured_hardsigmoid_out_out op(out);
22064op.meta(self);
22065op.impl(self, op.maybe_get_output(0));
22066if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
22067return out;
22068}
22069struct structured_hardsigmoid_out_inplace final : public at::native::structured_hardsigmoid_out {
22070 structured_hardsigmoid_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
22071 void set_output_strided(
22072 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22073 TensorOptions options, DimnameList names
22074 ) override {
22075 const auto& out = outputs_[output_idx].get();
22076 check_inplace(out, sizes, options);
22077 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
22078 if (C10_UNLIKELY(maybe_proxy.has_value())) {
22079 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
22080 }
22081 if (!names.empty()) {
22082 namedinference::propagate_names(outputs_[output_idx], names);
22083 }
22084 // super must happen after, so that downstream can use maybe_get_output
22085 // to retrieve the output
22086 at::native::structured_hardsigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22087 }
22088 void set_output_raw_strided(
22089 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22090 TensorOptions options, DimnameList names
22091 ) override {
22092 const auto& out = outputs_[output_idx].get();
22093 check_inplace(out, sizes, options);
22094 if (!names.empty()) {
22095 namedinference::propagate_names(outputs_[output_idx], names);
22096 }
22097 // super must happen after, so that downstream can use maybe_get_output
22098 // to retrieve the output
22099 at::native::structured_hardsigmoid_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22100 }
22101 const Tensor& maybe_get_output(int64_t output_idx) override {
22102 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
22103 }
22104 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
22105 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
22106};
22107at::Tensor & wrapper_CPU_hardsigmoid_(at::Tensor & self) {
22108structured_hardsigmoid_out_inplace op(self);
22109op.meta(self);
22110op.impl(self, op.outputs_[0]);
22111if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
22112return self;
22113}
22114struct structured_hardsigmoid_backward_out_functional final : public at::native::structured_hardsigmoid_backward_out {
22115 void set_output_strided(
22116 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22117 TensorOptions options, DimnameList names
22118 ) override {
22119 outputs_[output_idx] = create_out(sizes, strides, options);
22120 if (!names.empty()) {
22121 namedinference::propagate_names(*outputs_[output_idx], names);
22122 }
22123 // super must happen after, so that downstream can use maybe_get_output
22124 // to retrieve the output
22125 at::native::structured_hardsigmoid_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22126 }
22127 void set_output_raw_strided(
22128 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22129 TensorOptions options, DimnameList names
22130 ) override {
22131 outputs_[output_idx] = create_out(sizes, strides, options);
22132 if (!names.empty()) {
22133 namedinference::propagate_names(*outputs_[output_idx], names);
22134 }
22135 // super must happen after, so that downstream can use maybe_get_output
22136 // to retrieve the output
22137 at::native::structured_hardsigmoid_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22138 }
22139 const Tensor& maybe_get_output(int64_t output_idx) override {
22140 return *outputs_[output_idx];
22141 }
22142 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
22143};
22144at::Tensor wrapper_CPU_hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self) {
22145structured_hardsigmoid_backward_out_functional op;
22146op.meta(grad_output, self);
22147op.impl(grad_output, self, *op.outputs_[0]);
22148return std::move(op.outputs_[0]).take();
22149}
22150struct structured_hardsigmoid_backward_out_out final : public at::native::structured_hardsigmoid_backward_out {
22151 structured_hardsigmoid_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
22152 void set_output_strided(
22153 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22154 TensorOptions options, DimnameList names
22155 ) override {
22156 const auto& out = outputs_[output_idx].get();
22157 resize_out(out, sizes, strides, options);
22158 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
22159 if (C10_UNLIKELY(maybe_proxy.has_value())) {
22160 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
22161 }
22162 if (!names.empty()) {
22163 namedinference::propagate_names(outputs_[output_idx], names);
22164 }
22165 // super must happen after, so that downstream can use maybe_get_output
22166 // to retrieve the output
22167 at::native::structured_hardsigmoid_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22168 }
22169 void set_output_raw_strided(
22170 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22171 TensorOptions options, DimnameList names
22172 ) override {
22173 const auto& out = outputs_[output_idx].get();
22174 resize_out(out, sizes, strides, options);
22175 if (!names.empty()) {
22176 namedinference::propagate_names(outputs_[output_idx], names);
22177 }
22178 // super must happen after, so that downstream can use maybe_get_output
22179 // to retrieve the output
22180 at::native::structured_hardsigmoid_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22181 }
22182 const Tensor& maybe_get_output(int64_t output_idx) override {
22183 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
22184 }
22185 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
22186 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
22187};
22188at::Tensor & wrapper_CPU_hardsigmoid_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
22189structured_hardsigmoid_backward_out_out op(grad_input);
22190op.meta(grad_output, self);
22191op.impl(grad_output, self, op.maybe_get_output(0));
22192if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
22193return grad_input;
22194}
22195namespace {
22196at::Tensor wrapper_CPU__hardtanh(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
22197 // No device check
22198 // DeviceGuard omitted
22199 return at::native::hardtanh(self, min_val, max_val);
22200}
22201} // anonymous namespace
22202namespace {
22203at::Tensor & wrapper_CPU_out_hardtanh_out(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) {
22204 // No device check
22205 // DeviceGuard omitted
22206 return at::native::hardtanh_out(self, min_val, max_val, out);
22207}
22208} // anonymous namespace
22209namespace {
22210at::Tensor & wrapper_CPU__hardtanh_(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
22211 // No device check
22212 // DeviceGuard omitted
22213 return at::native::hardtanh_(self, min_val, max_val);
22214}
22215} // anonymous namespace
22216namespace {
22217at::Tensor wrapper_CPU__hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
22218 // No device check
22219 // DeviceGuard omitted
22220 return at::native::hardtanh_backward(grad_output, self, min_val, max_val);
22221}
22222} // anonymous namespace
22223namespace {
22224at::Tensor & wrapper_CPU_grad_input_hardtanh_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) {
22225 // No device check
22226 // DeviceGuard omitted
22227 return at::native::hardtanh_backward_out(grad_output, self, min_val, max_val, grad_input);
22228}
22229} // anonymous namespace
22230namespace {
22231at::Tensor wrapper_CPU__hardswish(const at::Tensor & self) {
22232 // No device check
22233 // DeviceGuard omitted
22234 return at::native::hardswish(self);
22235}
22236} // anonymous namespace
22237namespace {
22238at::Tensor & wrapper_CPU_out_hardswish_out(const at::Tensor & self, at::Tensor & out) {
22239 // No device check
22240 // DeviceGuard omitted
22241 return at::native::hardswish_out(self, out);
22242}
22243} // anonymous namespace
22244namespace {
22245at::Tensor & wrapper_CPU__hardswish_(at::Tensor & self) {
22246 // No device check
22247 // DeviceGuard omitted
22248 return at::native::hardswish_(self);
22249}
22250} // anonymous namespace
22251namespace {
22252at::Tensor wrapper_CPU__hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self) {
22253 // No device check
22254 // DeviceGuard omitted
22255 return at::native::hardswish_backward(grad_output, self);
22256}
22257} // anonymous namespace
22258struct structured_leaky_relu_out_functional final : public at::native::structured_leaky_relu_out {
22259 void set_output_strided(
22260 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22261 TensorOptions options, DimnameList names
22262 ) override {
22263 outputs_[output_idx] = create_out(sizes, strides, options);
22264 if (!names.empty()) {
22265 namedinference::propagate_names(*outputs_[output_idx], names);
22266 }
22267 // super must happen after, so that downstream can use maybe_get_output
22268 // to retrieve the output
22269 at::native::structured_leaky_relu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22270 }
22271 void set_output_raw_strided(
22272 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22273 TensorOptions options, DimnameList names
22274 ) override {
22275 outputs_[output_idx] = create_out(sizes, strides, options);
22276 if (!names.empty()) {
22277 namedinference::propagate_names(*outputs_[output_idx], names);
22278 }
22279 // super must happen after, so that downstream can use maybe_get_output
22280 // to retrieve the output
22281 at::native::structured_leaky_relu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22282 }
22283 const Tensor& maybe_get_output(int64_t output_idx) override {
22284 return *outputs_[output_idx];
22285 }
22286 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
22287};
22288at::Tensor wrapper_CPU_leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope) {
22289structured_leaky_relu_out_functional op;
22290op.meta(self, negative_slope);
22291op.impl(self, negative_slope, *op.outputs_[0]);
22292return std::move(op.outputs_[0]).take();
22293}
22294struct structured_leaky_relu_out_out final : public at::native::structured_leaky_relu_out {
22295 structured_leaky_relu_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
22296 void set_output_strided(
22297 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22298 TensorOptions options, DimnameList names
22299 ) override {
22300 const auto& out = outputs_[output_idx].get();
22301 resize_out(out, sizes, strides, options);
22302 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
22303 if (C10_UNLIKELY(maybe_proxy.has_value())) {
22304 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
22305 }
22306 if (!names.empty()) {
22307 namedinference::propagate_names(outputs_[output_idx], names);
22308 }
22309 // super must happen after, so that downstream can use maybe_get_output
22310 // to retrieve the output
22311 at::native::structured_leaky_relu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22312 }
22313 void set_output_raw_strided(
22314 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22315 TensorOptions options, DimnameList names
22316 ) override {
22317 const auto& out = outputs_[output_idx].get();
22318 resize_out(out, sizes, strides, options);
22319 if (!names.empty()) {
22320 namedinference::propagate_names(outputs_[output_idx], names);
22321 }
22322 // super must happen after, so that downstream can use maybe_get_output
22323 // to retrieve the output
22324 at::native::structured_leaky_relu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22325 }
22326 const Tensor& maybe_get_output(int64_t output_idx) override {
22327 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
22328 }
22329 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
22330 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
22331};
22332at::Tensor & wrapper_CPU_leaky_relu_out_out(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
22333structured_leaky_relu_out_out op(out);
22334op.meta(self, negative_slope);
22335op.impl(self, negative_slope, op.maybe_get_output(0));
22336if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
22337return out;
22338}
22339struct structured_leaky_relu_out_inplace final : public at::native::structured_leaky_relu_out {
22340 structured_leaky_relu_out_inplace(Tensor& self) : outputs_{std::ref(self)} {}
22341 void set_output_strided(
22342 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22343 TensorOptions options, DimnameList names
22344 ) override {
22345 const auto& out = outputs_[output_idx].get();
22346 check_inplace(out, sizes, options);
22347 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
22348 if (C10_UNLIKELY(maybe_proxy.has_value())) {
22349 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
22350 }
22351 if (!names.empty()) {
22352 namedinference::propagate_names(outputs_[output_idx], names);
22353 }
22354 // super must happen after, so that downstream can use maybe_get_output
22355 // to retrieve the output
22356 at::native::structured_leaky_relu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22357 }
22358 void set_output_raw_strided(
22359 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22360 TensorOptions options, DimnameList names
22361 ) override {
22362 const auto& out = outputs_[output_idx].get();
22363 check_inplace(out, sizes, options);
22364 if (!names.empty()) {
22365 namedinference::propagate_names(outputs_[output_idx], names);
22366 }
22367 // super must happen after, so that downstream can use maybe_get_output
22368 // to retrieve the output
22369 at::native::structured_leaky_relu_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22370 }
22371 const Tensor& maybe_get_output(int64_t output_idx) override {
22372 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
22373 }
22374 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
22375 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
22376};
22377at::Tensor & wrapper_CPU_leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope) {
22378structured_leaky_relu_out_inplace op(self);
22379op.meta(self, negative_slope);
22380op.impl(self, negative_slope, op.outputs_[0]);
22381if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
22382return self;
22383}
22384struct structured_leaky_relu_backward_out_functional final : public at::native::structured_leaky_relu_backward_out {
22385 void set_output_strided(
22386 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22387 TensorOptions options, DimnameList names
22388 ) override {
22389 outputs_[output_idx] = create_out(sizes, strides, options);
22390 if (!names.empty()) {
22391 namedinference::propagate_names(*outputs_[output_idx], names);
22392 }
22393 // super must happen after, so that downstream can use maybe_get_output
22394 // to retrieve the output
22395 at::native::structured_leaky_relu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22396 }
22397 void set_output_raw_strided(
22398 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22399 TensorOptions options, DimnameList names
22400 ) override {
22401 outputs_[output_idx] = create_out(sizes, strides, options);
22402 if (!names.empty()) {
22403 namedinference::propagate_names(*outputs_[output_idx], names);
22404 }
22405 // super must happen after, so that downstream can use maybe_get_output
22406 // to retrieve the output
22407 at::native::structured_leaky_relu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22408 }
22409 const Tensor& maybe_get_output(int64_t output_idx) override {
22410 return *outputs_[output_idx];
22411 }
22412 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
22413};
22414at::Tensor wrapper_CPU_leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
22415structured_leaky_relu_backward_out_functional op;
22416op.meta(grad_output, self, negative_slope, self_is_result);
22417op.impl(grad_output, self, negative_slope, self_is_result, *op.outputs_[0]);
22418return std::move(op.outputs_[0]).take();
22419}
22420struct structured_leaky_relu_backward_out_out final : public at::native::structured_leaky_relu_backward_out {
22421 structured_leaky_relu_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
22422 void set_output_strided(
22423 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22424 TensorOptions options, DimnameList names
22425 ) override {
22426 const auto& out = outputs_[output_idx].get();
22427 resize_out(out, sizes, strides, options);
22428 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
22429 if (C10_UNLIKELY(maybe_proxy.has_value())) {
22430 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
22431 }
22432 if (!names.empty()) {
22433 namedinference::propagate_names(outputs_[output_idx], names);
22434 }
22435 // super must happen after, so that downstream can use maybe_get_output
22436 // to retrieve the output
22437 at::native::structured_leaky_relu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22438 }
22439 void set_output_raw_strided(
22440 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22441 TensorOptions options, DimnameList names
22442 ) override {
22443 const auto& out = outputs_[output_idx].get();
22444 resize_out(out, sizes, strides, options);
22445 if (!names.empty()) {
22446 namedinference::propagate_names(outputs_[output_idx], names);
22447 }
22448 // super must happen after, so that downstream can use maybe_get_output
22449 // to retrieve the output
22450 at::native::structured_leaky_relu_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22451 }
22452 const Tensor& maybe_get_output(int64_t output_idx) override {
22453 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
22454 }
22455 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
22456 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
22457};
22458at::Tensor & wrapper_CPU_leaky_relu_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) {
22459structured_leaky_relu_backward_out_out op(grad_input);
22460op.meta(grad_output, self, negative_slope, self_is_result);
22461op.impl(grad_output, self, negative_slope, self_is_result, op.maybe_get_output(0));
22462if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
22463return grad_input;
22464}
22465namespace {
22466::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__log_sigmoid_forward(const at::Tensor & self) {
22467 // No device check
22468 // DeviceGuard omitted
22469 return at::native::log_sigmoid_forward_cpu(self);
22470}
22471} // anonymous namespace
22472namespace {
22473::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_output_log_sigmoid_forward_out(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) {
22474 // No device check
22475 // DeviceGuard omitted
22476 return at::native::log_sigmoid_forward_out_cpu(self, output, buffer);
22477}
22478} // anonymous namespace
22479namespace {
22480at::Tensor wrapper_CPU__log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
22481 // No device check
22482 // DeviceGuard omitted
22483 return at::native::log_sigmoid_backward_cpu(grad_output, self, buffer);
22484}
22485} // anonymous namespace
22486namespace {
22487at::Tensor & wrapper_CPU_grad_input_log_sigmoid_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) {
22488 // No device check
22489 // DeviceGuard omitted
22490 return at::native::log_sigmoid_backward_cpu_out(grad_output, self, buffer, grad_input);
22491}
22492} // anonymous namespace
22493namespace {
22494at::Tensor wrapper_CPU__rrelu_with_noise(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
22495 // No device check
22496 // DeviceGuard omitted
22497 return at::native::rrelu_with_noise_cpu(self, noise, lower, upper, training, generator);
22498}
22499} // anonymous namespace
22500namespace {
22501at::Tensor & wrapper_CPU_out_rrelu_with_noise_out(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator, at::Tensor & out) {
22502 // No device check
22503 // DeviceGuard omitted
22504 return at::native::rrelu_with_noise_out_cpu(self, noise, lower, upper, training, generator, out);
22505}
22506} // anonymous namespace
22507namespace {
22508at::Tensor & wrapper_CPU__rrelu_with_noise_(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
22509 // No device check
22510 // DeviceGuard omitted
22511 return at::native::rrelu_with_noise_cpu_(self, noise, lower, upper, training, generator);
22512}
22513} // anonymous namespace
22514struct structured_softplus_out_functional final : public at::native::structured_softplus_out {
22515 void set_output_strided(
22516 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22517 TensorOptions options, DimnameList names
22518 ) override {
22519 outputs_[output_idx] = create_out(sizes, strides, options);
22520 if (!names.empty()) {
22521 namedinference::propagate_names(*outputs_[output_idx], names);
22522 }
22523 // super must happen after, so that downstream can use maybe_get_output
22524 // to retrieve the output
22525 at::native::structured_softplus_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22526 }
22527 void set_output_raw_strided(
22528 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22529 TensorOptions options, DimnameList names
22530 ) override {
22531 outputs_[output_idx] = create_out(sizes, strides, options);
22532 if (!names.empty()) {
22533 namedinference::propagate_names(*outputs_[output_idx], names);
22534 }
22535 // super must happen after, so that downstream can use maybe_get_output
22536 // to retrieve the output
22537 at::native::structured_softplus_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22538 }
22539 const Tensor& maybe_get_output(int64_t output_idx) override {
22540 return *outputs_[output_idx];
22541 }
22542 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
22543};
22544at::Tensor wrapper_CPU_softplus(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
22545structured_softplus_out_functional op;
22546op.meta(self, beta, threshold);
22547op.impl(self, beta, threshold, *op.outputs_[0]);
22548return std::move(op.outputs_[0]).take();
22549}
22550struct structured_softplus_out_out final : public at::native::structured_softplus_out {
22551 structured_softplus_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
22552 void set_output_strided(
22553 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22554 TensorOptions options, DimnameList names
22555 ) override {
22556 const auto& out = outputs_[output_idx].get();
22557 resize_out(out, sizes, strides, options);
22558 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
22559 if (C10_UNLIKELY(maybe_proxy.has_value())) {
22560 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
22561 }
22562 if (!names.empty()) {
22563 namedinference::propagate_names(outputs_[output_idx], names);
22564 }
22565 // super must happen after, so that downstream can use maybe_get_output
22566 // to retrieve the output
22567 at::native::structured_softplus_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22568 }
22569 void set_output_raw_strided(
22570 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22571 TensorOptions options, DimnameList names
22572 ) override {
22573 const auto& out = outputs_[output_idx].get();
22574 resize_out(out, sizes, strides, options);
22575 if (!names.empty()) {
22576 namedinference::propagate_names(outputs_[output_idx], names);
22577 }
22578 // super must happen after, so that downstream can use maybe_get_output
22579 // to retrieve the output
22580 at::native::structured_softplus_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22581 }
22582 const Tensor& maybe_get_output(int64_t output_idx) override {
22583 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
22584 }
22585 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
22586 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
22587};
22588at::Tensor & wrapper_CPU_softplus_out_out(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) {
22589structured_softplus_out_out op(out);
22590op.meta(self, beta, threshold);
22591op.impl(self, beta, threshold, op.maybe_get_output(0));
22592if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
22593return out;
22594}
22595struct structured_softplus_backward_out_functional final : public at::native::structured_softplus_backward_out {
22596 void set_output_strided(
22597 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22598 TensorOptions options, DimnameList names
22599 ) override {
22600 outputs_[output_idx] = create_out(sizes, strides, options);
22601 if (!names.empty()) {
22602 namedinference::propagate_names(*outputs_[output_idx], names);
22603 }
22604 // super must happen after, so that downstream can use maybe_get_output
22605 // to retrieve the output
22606 at::native::structured_softplus_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22607 }
22608 void set_output_raw_strided(
22609 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22610 TensorOptions options, DimnameList names
22611 ) override {
22612 outputs_[output_idx] = create_out(sizes, strides, options);
22613 if (!names.empty()) {
22614 namedinference::propagate_names(*outputs_[output_idx], names);
22615 }
22616 // super must happen after, so that downstream can use maybe_get_output
22617 // to retrieve the output
22618 at::native::structured_softplus_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22619 }
22620 const Tensor& maybe_get_output(int64_t output_idx) override {
22621 return *outputs_[output_idx];
22622 }
22623 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
22624};
22625at::Tensor wrapper_CPU_softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
22626structured_softplus_backward_out_functional op;
22627op.meta(grad_output, self, beta, threshold);
22628op.impl(grad_output, self, beta, threshold, *op.outputs_[0]);
22629return std::move(op.outputs_[0]).take();
22630}
22631struct structured_softplus_backward_out_out final : public at::native::structured_softplus_backward_out {
22632 structured_softplus_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
22633 void set_output_strided(
22634 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22635 TensorOptions options, DimnameList names
22636 ) override {
22637 const auto& out = outputs_[output_idx].get();
22638 resize_out(out, sizes, strides, options);
22639 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
22640 if (C10_UNLIKELY(maybe_proxy.has_value())) {
22641 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
22642 }
22643 if (!names.empty()) {
22644 namedinference::propagate_names(outputs_[output_idx], names);
22645 }
22646 // super must happen after, so that downstream can use maybe_get_output
22647 // to retrieve the output
22648 at::native::structured_softplus_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22649 }
22650 void set_output_raw_strided(
22651 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22652 TensorOptions options, DimnameList names
22653 ) override {
22654 const auto& out = outputs_[output_idx].get();
22655 resize_out(out, sizes, strides, options);
22656 if (!names.empty()) {
22657 namedinference::propagate_names(outputs_[output_idx], names);
22658 }
22659 // super must happen after, so that downstream can use maybe_get_output
22660 // to retrieve the output
22661 at::native::structured_softplus_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22662 }
22663 const Tensor& maybe_get_output(int64_t output_idx) override {
22664 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
22665 }
22666 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
22667 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
22668};
22669at::Tensor & wrapper_CPU_softplus_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) {
22670structured_softplus_backward_out_out op(grad_input);
22671op.meta(grad_output, self, beta, threshold);
22672op.impl(grad_output, self, beta, threshold, op.maybe_get_output(0));
22673if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
22674return grad_input;
22675}
22676struct structured_softshrink_out_functional final : public at::native::structured_softshrink_out {
22677 void set_output_strided(
22678 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22679 TensorOptions options, DimnameList names
22680 ) override {
22681 outputs_[output_idx] = create_out(sizes, strides, options);
22682 if (!names.empty()) {
22683 namedinference::propagate_names(*outputs_[output_idx], names);
22684 }
22685 // super must happen after, so that downstream can use maybe_get_output
22686 // to retrieve the output
22687 at::native::structured_softshrink_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22688 }
22689 void set_output_raw_strided(
22690 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22691 TensorOptions options, DimnameList names
22692 ) override {
22693 outputs_[output_idx] = create_out(sizes, strides, options);
22694 if (!names.empty()) {
22695 namedinference::propagate_names(*outputs_[output_idx], names);
22696 }
22697 // super must happen after, so that downstream can use maybe_get_output
22698 // to retrieve the output
22699 at::native::structured_softshrink_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22700 }
22701 const Tensor& maybe_get_output(int64_t output_idx) override {
22702 return *outputs_[output_idx];
22703 }
22704 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
22705};
22706at::Tensor wrapper_CPU_softshrink(const at::Tensor & self, const at::Scalar & lambd) {
22707structured_softshrink_out_functional op;
22708op.meta(self, lambd);
22709op.impl(self, lambd, *op.outputs_[0]);
22710return std::move(op.outputs_[0]).take();
22711}
22712struct structured_softshrink_out_out final : public at::native::structured_softshrink_out {
22713 structured_softshrink_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
22714 void set_output_strided(
22715 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22716 TensorOptions options, DimnameList names
22717 ) override {
22718 const auto& out = outputs_[output_idx].get();
22719 resize_out(out, sizes, strides, options);
22720 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
22721 if (C10_UNLIKELY(maybe_proxy.has_value())) {
22722 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
22723 }
22724 if (!names.empty()) {
22725 namedinference::propagate_names(outputs_[output_idx], names);
22726 }
22727 // super must happen after, so that downstream can use maybe_get_output
22728 // to retrieve the output
22729 at::native::structured_softshrink_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22730 }
22731 void set_output_raw_strided(
22732 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22733 TensorOptions options, DimnameList names
22734 ) override {
22735 const auto& out = outputs_[output_idx].get();
22736 resize_out(out, sizes, strides, options);
22737 if (!names.empty()) {
22738 namedinference::propagate_names(outputs_[output_idx], names);
22739 }
22740 // super must happen after, so that downstream can use maybe_get_output
22741 // to retrieve the output
22742 at::native::structured_softshrink_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22743 }
22744 const Tensor& maybe_get_output(int64_t output_idx) override {
22745 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
22746 }
22747 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
22748 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
22749};
22750at::Tensor & wrapper_CPU_softshrink_out_out(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
22751structured_softshrink_out_out op(out);
22752op.meta(self, lambd);
22753op.impl(self, lambd, op.maybe_get_output(0));
22754if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
22755return out;
22756}
22757struct structured_softshrink_backward_out_functional final : public at::native::structured_softshrink_backward_out {
22758 void set_output_strided(
22759 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22760 TensorOptions options, DimnameList names
22761 ) override {
22762 outputs_[output_idx] = create_out(sizes, strides, options);
22763 if (!names.empty()) {
22764 namedinference::propagate_names(*outputs_[output_idx], names);
22765 }
22766 // super must happen after, so that downstream can use maybe_get_output
22767 // to retrieve the output
22768 at::native::structured_softshrink_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22769 }
22770 void set_output_raw_strided(
22771 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22772 TensorOptions options, DimnameList names
22773 ) override {
22774 outputs_[output_idx] = create_out(sizes, strides, options);
22775 if (!names.empty()) {
22776 namedinference::propagate_names(*outputs_[output_idx], names);
22777 }
22778 // super must happen after, so that downstream can use maybe_get_output
22779 // to retrieve the output
22780 at::native::structured_softshrink_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22781 }
22782 const Tensor& maybe_get_output(int64_t output_idx) override {
22783 return *outputs_[output_idx];
22784 }
22785 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
22786};
22787at::Tensor wrapper_CPU_softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
22788structured_softshrink_backward_out_functional op;
22789op.meta(grad_output, self, lambd);
22790op.impl(grad_output, self, lambd, *op.outputs_[0]);
22791return std::move(op.outputs_[0]).take();
22792}
22793struct structured_softshrink_backward_out_out final : public at::native::structured_softshrink_backward_out {
22794 structured_softshrink_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
22795 void set_output_strided(
22796 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22797 TensorOptions options, DimnameList names
22798 ) override {
22799 const auto& out = outputs_[output_idx].get();
22800 resize_out(out, sizes, strides, options);
22801 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
22802 if (C10_UNLIKELY(maybe_proxy.has_value())) {
22803 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
22804 }
22805 if (!names.empty()) {
22806 namedinference::propagate_names(outputs_[output_idx], names);
22807 }
22808 // super must happen after, so that downstream can use maybe_get_output
22809 // to retrieve the output
22810 at::native::structured_softshrink_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22811 }
22812 void set_output_raw_strided(
22813 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22814 TensorOptions options, DimnameList names
22815 ) override {
22816 const auto& out = outputs_[output_idx].get();
22817 resize_out(out, sizes, strides, options);
22818 if (!names.empty()) {
22819 namedinference::propagate_names(outputs_[output_idx], names);
22820 }
22821 // super must happen after, so that downstream can use maybe_get_output
22822 // to retrieve the output
22823 at::native::structured_softshrink_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
22824 }
22825 const Tensor& maybe_get_output(int64_t output_idx) override {
22826 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
22827 }
22828 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
22829 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
22830};
22831at::Tensor & wrapper_CPU_softshrink_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
22832structured_softshrink_backward_out_out op(grad_input);
22833op.meta(grad_output, self, lambd);
22834op.impl(grad_output, self, lambd, op.maybe_get_output(0));
22835if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
22836return grad_input;
22837}
22838namespace {
22839at::Tensor & wrapper_CPU_out_adaptive_avg_pool2d_out(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
22840 // No device check
22841 // DeviceGuard omitted
22842 return at::native::adaptive_avg_pool2d_out_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), out);
22843}
22844} // anonymous namespace
22845namespace {
22846at::Tensor wrapper_CPU___adaptive_avg_pool2d(const at::Tensor & self, c10::SymIntArrayRef output_size) {
22847 // No device check
22848 // DeviceGuard omitted
22849 return at::native::adaptive_avg_pool2d_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size));
22850}
22851} // anonymous namespace
22852namespace {
22853at::Tensor wrapper_CPU___adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self) {
22854 // No device check
22855 // DeviceGuard omitted
22856 return at::native::adaptive_avg_pool2d_backward_cpu(grad_output, self);
22857}
22858} // anonymous namespace
22859namespace {
22860at::Tensor & wrapper_CPU_out_adaptive_avg_pool3d_out(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
22861 // No device check
22862 // DeviceGuard omitted
22863 return at::native::adaptive_avg_pool3d_out_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), out);
22864}
22865} // anonymous namespace
22866namespace {
22867at::Tensor wrapper_CPU___adaptive_avg_pool3d(const at::Tensor & self, c10::SymIntArrayRef output_size) {
22868 // No device check
22869 // DeviceGuard omitted
22870 return at::native::adaptive_avg_pool3d_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size));
22871}
22872} // anonymous namespace
22873namespace {
22874at::Tensor & wrapper_CPU_grad_input_adaptive_avg_pool3d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
22875 // No device check
22876 // DeviceGuard omitted
22877 return at::native::adaptive_avg_pool3d_backward_out_cpu(grad_output, self, grad_input);
22878}
22879} // anonymous namespace
22880namespace {
22881at::Tensor wrapper_CPU___adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self) {
22882 // No device check
22883 // DeviceGuard omitted
22884 return at::native::adaptive_avg_pool3d_backward_cpu(grad_output, self);
22885}
22886} // anonymous namespace
22887struct structured_adaptive_max_pool2d_out_cpu_functional final : public at::native::structured_adaptive_max_pool2d_out_cpu {
22888 void set_output_strided(
22889 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22890 TensorOptions options, DimnameList names
22891 ) override {
22892 outputs_[output_idx] = create_out(sizes, strides, options);
22893 if (!names.empty()) {
22894 namedinference::propagate_names(*outputs_[output_idx], names);
22895 }
22896 // super must happen after, so that downstream can use maybe_get_output
22897 // to retrieve the output
22898 }
22899 void set_output_raw_strided(
22900 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22901 TensorOptions options, DimnameList names
22902 ) override {
22903 outputs_[output_idx] = create_out(sizes, strides, options);
22904 if (!names.empty()) {
22905 namedinference::propagate_names(*outputs_[output_idx], names);
22906 }
22907 // super must happen after, so that downstream can use maybe_get_output
22908 // to retrieve the output
22909 }
22910 const Tensor& maybe_get_output(int64_t output_idx) override {
22911 return *outputs_[output_idx];
22912 }
22913 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
22914};
22915::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
22916structured_adaptive_max_pool2d_out_cpu_functional op;
22917op.meta(self, output_size);
22918op.impl(self, output_size, *op.outputs_[0], *op.outputs_[1]);
22919return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
22920}
22921struct structured_adaptive_max_pool2d_out_cpu_out final : public at::native::structured_adaptive_max_pool2d_out_cpu {
22922 structured_adaptive_max_pool2d_out_cpu_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
22923 void set_output_strided(
22924 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22925 TensorOptions options, DimnameList names
22926 ) override {
22927 const auto& out = outputs_[output_idx].get();
22928 resize_out(out, sizes, strides, options);
22929 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
22930 if (C10_UNLIKELY(maybe_proxy.has_value())) {
22931 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
22932 }
22933 if (!names.empty()) {
22934 namedinference::propagate_names(outputs_[output_idx], names);
22935 }
22936 // super must happen after, so that downstream can use maybe_get_output
22937 // to retrieve the output
22938 }
22939 void set_output_raw_strided(
22940 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22941 TensorOptions options, DimnameList names
22942 ) override {
22943 const auto& out = outputs_[output_idx].get();
22944 resize_out(out, sizes, strides, options);
22945 if (!names.empty()) {
22946 namedinference::propagate_names(outputs_[output_idx], names);
22947 }
22948 // super must happen after, so that downstream can use maybe_get_output
22949 // to retrieve the output
22950 }
22951 const Tensor& maybe_get_output(int64_t output_idx) override {
22952 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
22953 }
22954 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
22955 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
22956};
22957::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_adaptive_max_pool2d_out_out(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
22958structured_adaptive_max_pool2d_out_cpu_out op(out, indices);
22959op.meta(self, output_size);
22960op.impl(self, output_size, op.maybe_get_output(0), op.maybe_get_output(1));
22961if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
22962if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
22963return std::forward_as_tuple(out, indices);
22964}
22965struct structured_adaptive_max_pool2d_backward_out_cpu_functional final : public at::native::structured_adaptive_max_pool2d_backward_out_cpu {
22966 void set_output_strided(
22967 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22968 TensorOptions options, DimnameList names
22969 ) override {
22970 outputs_[output_idx] = create_out(sizes, strides, options);
22971 if (!names.empty()) {
22972 namedinference::propagate_names(*outputs_[output_idx], names);
22973 }
22974 // super must happen after, so that downstream can use maybe_get_output
22975 // to retrieve the output
22976 }
22977 void set_output_raw_strided(
22978 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
22979 TensorOptions options, DimnameList names
22980 ) override {
22981 outputs_[output_idx] = create_out(sizes, strides, options);
22982 if (!names.empty()) {
22983 namedinference::propagate_names(*outputs_[output_idx], names);
22984 }
22985 // super must happen after, so that downstream can use maybe_get_output
22986 // to retrieve the output
22987 }
22988 const Tensor& maybe_get_output(int64_t output_idx) override {
22989 return *outputs_[output_idx];
22990 }
22991 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
22992};
22993at::Tensor wrapper_CPU_adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
22994structured_adaptive_max_pool2d_backward_out_cpu_functional op;
22995op.meta(grad_output, self, indices);
22996op.impl(grad_output, self, indices, *op.outputs_[0]);
22997return std::move(op.outputs_[0]).take();
22998}
22999struct structured_adaptive_max_pool2d_backward_out_cpu_out final : public at::native::structured_adaptive_max_pool2d_backward_out_cpu {
23000 structured_adaptive_max_pool2d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
23001 void set_output_strided(
23002 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23003 TensorOptions options, DimnameList names
23004 ) override {
23005 const auto& out = outputs_[output_idx].get();
23006 resize_out(out, sizes, strides, options);
23007 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
23008 if (C10_UNLIKELY(maybe_proxy.has_value())) {
23009 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
23010 }
23011 if (!names.empty()) {
23012 namedinference::propagate_names(outputs_[output_idx], names);
23013 }
23014 // super must happen after, so that downstream can use maybe_get_output
23015 // to retrieve the output
23016 }
23017 void set_output_raw_strided(
23018 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23019 TensorOptions options, DimnameList names
23020 ) override {
23021 const auto& out = outputs_[output_idx].get();
23022 resize_out(out, sizes, strides, options);
23023 if (!names.empty()) {
23024 namedinference::propagate_names(outputs_[output_idx], names);
23025 }
23026 // super must happen after, so that downstream can use maybe_get_output
23027 // to retrieve the output
23028 }
23029 const Tensor& maybe_get_output(int64_t output_idx) override {
23030 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
23031 }
23032 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
23033 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
23034};
23035at::Tensor & wrapper_CPU_adaptive_max_pool2d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
23036structured_adaptive_max_pool2d_backward_out_cpu_out op(grad_input);
23037op.meta(grad_output, self, indices);
23038op.impl(grad_output, self, indices, op.maybe_get_output(0));
23039if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
23040return grad_input;
23041}
23042struct structured_adaptive_max_pool3d_out_cpu_functional final : public at::native::structured_adaptive_max_pool3d_out_cpu {
23043 void set_output_strided(
23044 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23045 TensorOptions options, DimnameList names
23046 ) override {
23047 outputs_[output_idx] = create_out(sizes, strides, options);
23048 if (!names.empty()) {
23049 namedinference::propagate_names(*outputs_[output_idx], names);
23050 }
23051 // super must happen after, so that downstream can use maybe_get_output
23052 // to retrieve the output
23053 }
23054 void set_output_raw_strided(
23055 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23056 TensorOptions options, DimnameList names
23057 ) override {
23058 outputs_[output_idx] = create_out(sizes, strides, options);
23059 if (!names.empty()) {
23060 namedinference::propagate_names(*outputs_[output_idx], names);
23061 }
23062 // super must happen after, so that downstream can use maybe_get_output
23063 // to retrieve the output
23064 }
23065 const Tensor& maybe_get_output(int64_t output_idx) override {
23066 return *outputs_[output_idx];
23067 }
23068 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
23069};
23070::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
23071structured_adaptive_max_pool3d_out_cpu_functional op;
23072op.meta(self, output_size);
23073op.impl(self, output_size, *op.outputs_[0], *op.outputs_[1]);
23074return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
23075}
23076struct structured_adaptive_max_pool3d_out_cpu_out final : public at::native::structured_adaptive_max_pool3d_out_cpu {
23077 structured_adaptive_max_pool3d_out_cpu_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
23078 void set_output_strided(
23079 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23080 TensorOptions options, DimnameList names
23081 ) override {
23082 const auto& out = outputs_[output_idx].get();
23083 resize_out(out, sizes, strides, options);
23084 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
23085 if (C10_UNLIKELY(maybe_proxy.has_value())) {
23086 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
23087 }
23088 if (!names.empty()) {
23089 namedinference::propagate_names(outputs_[output_idx], names);
23090 }
23091 // super must happen after, so that downstream can use maybe_get_output
23092 // to retrieve the output
23093 }
23094 void set_output_raw_strided(
23095 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23096 TensorOptions options, DimnameList names
23097 ) override {
23098 const auto& out = outputs_[output_idx].get();
23099 resize_out(out, sizes, strides, options);
23100 if (!names.empty()) {
23101 namedinference::propagate_names(outputs_[output_idx], names);
23102 }
23103 // super must happen after, so that downstream can use maybe_get_output
23104 // to retrieve the output
23105 }
23106 const Tensor& maybe_get_output(int64_t output_idx) override {
23107 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
23108 }
23109 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
23110 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
23111};
23112::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_adaptive_max_pool3d_out_out(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
23113structured_adaptive_max_pool3d_out_cpu_out op(out, indices);
23114op.meta(self, output_size);
23115op.impl(self, output_size, op.maybe_get_output(0), op.maybe_get_output(1));
23116if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
23117if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
23118return std::forward_as_tuple(out, indices);
23119}
23120struct structured_adaptive_max_pool3d_backward_out_cpu_functional final : public at::native::structured_adaptive_max_pool3d_backward_out_cpu {
23121 void set_output_strided(
23122 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23123 TensorOptions options, DimnameList names
23124 ) override {
23125 outputs_[output_idx] = create_out(sizes, strides, options);
23126 if (!names.empty()) {
23127 namedinference::propagate_names(*outputs_[output_idx], names);
23128 }
23129 // super must happen after, so that downstream can use maybe_get_output
23130 // to retrieve the output
23131 }
23132 void set_output_raw_strided(
23133 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23134 TensorOptions options, DimnameList names
23135 ) override {
23136 outputs_[output_idx] = create_out(sizes, strides, options);
23137 if (!names.empty()) {
23138 namedinference::propagate_names(*outputs_[output_idx], names);
23139 }
23140 // super must happen after, so that downstream can use maybe_get_output
23141 // to retrieve the output
23142 }
23143 const Tensor& maybe_get_output(int64_t output_idx) override {
23144 return *outputs_[output_idx];
23145 }
23146 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
23147};
23148at::Tensor wrapper_CPU_adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
23149structured_adaptive_max_pool3d_backward_out_cpu_functional op;
23150op.meta(grad_output, self, indices);
23151op.impl(grad_output, self, indices, *op.outputs_[0]);
23152return std::move(op.outputs_[0]).take();
23153}
23154struct structured_adaptive_max_pool3d_backward_out_cpu_out final : public at::native::structured_adaptive_max_pool3d_backward_out_cpu {
23155 structured_adaptive_max_pool3d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
23156 void set_output_strided(
23157 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23158 TensorOptions options, DimnameList names
23159 ) override {
23160 const auto& out = outputs_[output_idx].get();
23161 resize_out(out, sizes, strides, options);
23162 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
23163 if (C10_UNLIKELY(maybe_proxy.has_value())) {
23164 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
23165 }
23166 if (!names.empty()) {
23167 namedinference::propagate_names(outputs_[output_idx], names);
23168 }
23169 // super must happen after, so that downstream can use maybe_get_output
23170 // to retrieve the output
23171 }
23172 void set_output_raw_strided(
23173 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23174 TensorOptions options, DimnameList names
23175 ) override {
23176 const auto& out = outputs_[output_idx].get();
23177 resize_out(out, sizes, strides, options);
23178 if (!names.empty()) {
23179 namedinference::propagate_names(outputs_[output_idx], names);
23180 }
23181 // super must happen after, so that downstream can use maybe_get_output
23182 // to retrieve the output
23183 }
23184 const Tensor& maybe_get_output(int64_t output_idx) override {
23185 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
23186 }
23187 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
23188 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
23189};
23190at::Tensor & wrapper_CPU_adaptive_max_pool3d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
23191structured_adaptive_max_pool3d_backward_out_cpu_out op(grad_input);
23192op.meta(grad_output, self, indices);
23193op.impl(grad_output, self, indices, op.maybe_get_output(0));
23194if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
23195return grad_input;
23196}
23197struct structured_avg_pool2d_out_cpu_functional final : public at::native::structured_avg_pool2d_out_cpu {
23198 void set_output_strided(
23199 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23200 TensorOptions options, DimnameList names
23201 ) override {
23202 outputs_[output_idx] = create_out(sizes, strides, options);
23203 if (!names.empty()) {
23204 namedinference::propagate_names(*outputs_[output_idx], names);
23205 }
23206 // super must happen after, so that downstream can use maybe_get_output
23207 // to retrieve the output
23208 }
23209 void set_output_raw_strided(
23210 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23211 TensorOptions options, DimnameList names
23212 ) override {
23213 outputs_[output_idx] = create_out(sizes, strides, options);
23214 if (!names.empty()) {
23215 namedinference::propagate_names(*outputs_[output_idx], names);
23216 }
23217 // super must happen after, so that downstream can use maybe_get_output
23218 // to retrieve the output
23219 }
23220 const Tensor& maybe_get_output(int64_t output_idx) override {
23221 return *outputs_[output_idx];
23222 }
23223 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
23224};
23225at::Tensor wrapper_CPU_avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
23226structured_avg_pool2d_out_cpu_functional op;
23227auto precompute = op.meta(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
23228(void)precompute;
23229op.impl(self, precompute.kH, precompute.kW, precompute.dH, precompute.dW, precompute.padH, precompute.padW, ceil_mode, count_include_pad, divisor_override, *op.outputs_[0]);
23230return std::move(op.outputs_[0]).take();
23231}
23232struct structured_avg_pool2d_out_cpu_out final : public at::native::structured_avg_pool2d_out_cpu {
23233 structured_avg_pool2d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
23234 void set_output_strided(
23235 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23236 TensorOptions options, DimnameList names
23237 ) override {
23238 const auto& out = outputs_[output_idx].get();
23239 resize_out(out, sizes, strides, options);
23240 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
23241 if (C10_UNLIKELY(maybe_proxy.has_value())) {
23242 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
23243 }
23244 if (!names.empty()) {
23245 namedinference::propagate_names(outputs_[output_idx], names);
23246 }
23247 // super must happen after, so that downstream can use maybe_get_output
23248 // to retrieve the output
23249 }
23250 void set_output_raw_strided(
23251 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23252 TensorOptions options, DimnameList names
23253 ) override {
23254 const auto& out = outputs_[output_idx].get();
23255 resize_out(out, sizes, strides, options);
23256 if (!names.empty()) {
23257 namedinference::propagate_names(outputs_[output_idx], names);
23258 }
23259 // super must happen after, so that downstream can use maybe_get_output
23260 // to retrieve the output
23261 }
23262 const Tensor& maybe_get_output(int64_t output_idx) override {
23263 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
23264 }
23265 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
23266 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
23267};
23268at::Tensor & wrapper_CPU_avg_pool2d_out_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out) {
23269structured_avg_pool2d_out_cpu_out op(out);
23270auto precompute = op.meta(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
23271(void)precompute;
23272op.impl(self, precompute.kH, precompute.kW, precompute.dH, precompute.dW, precompute.padH, precompute.padW, ceil_mode, count_include_pad, divisor_override, op.maybe_get_output(0));
23273if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
23274return out;
23275}
23276struct structured_avg_pool2d_backward_out_cpu_functional final : public at::native::structured_avg_pool2d_backward_out_cpu {
23277 void set_output_strided(
23278 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23279 TensorOptions options, DimnameList names
23280 ) override {
23281 outputs_[output_idx] = create_out(sizes, strides, options);
23282 if (!names.empty()) {
23283 namedinference::propagate_names(*outputs_[output_idx], names);
23284 }
23285 // super must happen after, so that downstream can use maybe_get_output
23286 // to retrieve the output
23287 }
23288 void set_output_raw_strided(
23289 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23290 TensorOptions options, DimnameList names
23291 ) override {
23292 outputs_[output_idx] = create_out(sizes, strides, options);
23293 if (!names.empty()) {
23294 namedinference::propagate_names(*outputs_[output_idx], names);
23295 }
23296 // super must happen after, so that downstream can use maybe_get_output
23297 // to retrieve the output
23298 }
23299 const Tensor& maybe_get_output(int64_t output_idx) override {
23300 return *outputs_[output_idx];
23301 }
23302 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
23303};
23304at::Tensor wrapper_CPU_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
23305structured_avg_pool2d_backward_out_cpu_functional op;
23306op.meta(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
23307op.impl(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, *op.outputs_[0]);
23308return std::move(op.outputs_[0]).take();
23309}
23310struct structured_avg_pool2d_backward_out_cpu_out final : public at::native::structured_avg_pool2d_backward_out_cpu {
23311 structured_avg_pool2d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
23312 void set_output_strided(
23313 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23314 TensorOptions options, DimnameList names
23315 ) override {
23316 const auto& out = outputs_[output_idx].get();
23317 resize_out(out, sizes, strides, options);
23318 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
23319 if (C10_UNLIKELY(maybe_proxy.has_value())) {
23320 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
23321 }
23322 if (!names.empty()) {
23323 namedinference::propagate_names(outputs_[output_idx], names);
23324 }
23325 // super must happen after, so that downstream can use maybe_get_output
23326 // to retrieve the output
23327 }
23328 void set_output_raw_strided(
23329 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23330 TensorOptions options, DimnameList names
23331 ) override {
23332 const auto& out = outputs_[output_idx].get();
23333 resize_out(out, sizes, strides, options);
23334 if (!names.empty()) {
23335 namedinference::propagate_names(outputs_[output_idx], names);
23336 }
23337 // super must happen after, so that downstream can use maybe_get_output
23338 // to retrieve the output
23339 }
23340 const Tensor& maybe_get_output(int64_t output_idx) override {
23341 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
23342 }
23343 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
23344 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
23345};
23346at::Tensor & wrapper_CPU_avg_pool2d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input) {
23347structured_avg_pool2d_backward_out_cpu_out op(grad_input);
23348op.meta(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
23349op.impl(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, op.maybe_get_output(0));
23350if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
23351return grad_input;
23352}
23353struct structured_avg_pool3d_out_cpu_functional final : public at::native::structured_avg_pool3d_out_cpu {
23354 void set_output_strided(
23355 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23356 TensorOptions options, DimnameList names
23357 ) override {
23358 outputs_[output_idx] = create_out(sizes, strides, options);
23359 if (!names.empty()) {
23360 namedinference::propagate_names(*outputs_[output_idx], names);
23361 }
23362 // super must happen after, so that downstream can use maybe_get_output
23363 // to retrieve the output
23364 }
23365 void set_output_raw_strided(
23366 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23367 TensorOptions options, DimnameList names
23368 ) override {
23369 outputs_[output_idx] = create_out(sizes, strides, options);
23370 if (!names.empty()) {
23371 namedinference::propagate_names(*outputs_[output_idx], names);
23372 }
23373 // super must happen after, so that downstream can use maybe_get_output
23374 // to retrieve the output
23375 }
23376 const Tensor& maybe_get_output(int64_t output_idx) override {
23377 return *outputs_[output_idx];
23378 }
23379 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
23380};
23381at::Tensor wrapper_CPU_avg_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
23382structured_avg_pool3d_out_cpu_functional op;
23383op.meta(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
23384op.impl(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, *op.outputs_[0]);
23385return std::move(op.outputs_[0]).take();
23386}
23387struct structured_avg_pool3d_out_cpu_out final : public at::native::structured_avg_pool3d_out_cpu {
23388 structured_avg_pool3d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
23389 void set_output_strided(
23390 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23391 TensorOptions options, DimnameList names
23392 ) override {
23393 const auto& out = outputs_[output_idx].get();
23394 resize_out(out, sizes, strides, options);
23395 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
23396 if (C10_UNLIKELY(maybe_proxy.has_value())) {
23397 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
23398 }
23399 if (!names.empty()) {
23400 namedinference::propagate_names(outputs_[output_idx], names);
23401 }
23402 // super must happen after, so that downstream can use maybe_get_output
23403 // to retrieve the output
23404 }
23405 void set_output_raw_strided(
23406 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23407 TensorOptions options, DimnameList names
23408 ) override {
23409 const auto& out = outputs_[output_idx].get();
23410 resize_out(out, sizes, strides, options);
23411 if (!names.empty()) {
23412 namedinference::propagate_names(outputs_[output_idx], names);
23413 }
23414 // super must happen after, so that downstream can use maybe_get_output
23415 // to retrieve the output
23416 }
23417 const Tensor& maybe_get_output(int64_t output_idx) override {
23418 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
23419 }
23420 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
23421 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
23422};
23423at::Tensor & wrapper_CPU_avg_pool3d_out_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out) {
23424structured_avg_pool3d_out_cpu_out op(out);
23425op.meta(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
23426op.impl(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, op.maybe_get_output(0));
23427if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
23428return out;
23429}
23430struct structured_avg_pool3d_backward_out_cpu_functional final : public at::native::structured_avg_pool3d_backward_out_cpu {
23431 void set_output_strided(
23432 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23433 TensorOptions options, DimnameList names
23434 ) override {
23435 outputs_[output_idx] = create_out(sizes, strides, options);
23436 if (!names.empty()) {
23437 namedinference::propagate_names(*outputs_[output_idx], names);
23438 }
23439 // super must happen after, so that downstream can use maybe_get_output
23440 // to retrieve the output
23441 }
23442 void set_output_raw_strided(
23443 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23444 TensorOptions options, DimnameList names
23445 ) override {
23446 outputs_[output_idx] = create_out(sizes, strides, options);
23447 if (!names.empty()) {
23448 namedinference::propagate_names(*outputs_[output_idx], names);
23449 }
23450 // super must happen after, so that downstream can use maybe_get_output
23451 // to retrieve the output
23452 }
23453 const Tensor& maybe_get_output(int64_t output_idx) override {
23454 return *outputs_[output_idx];
23455 }
23456 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
23457};
23458at::Tensor wrapper_CPU_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
23459structured_avg_pool3d_backward_out_cpu_functional op;
23460op.meta(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
23461op.impl(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, *op.outputs_[0]);
23462return std::move(op.outputs_[0]).take();
23463}
23464struct structured_avg_pool3d_backward_out_cpu_out final : public at::native::structured_avg_pool3d_backward_out_cpu {
23465 structured_avg_pool3d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
23466 void set_output_strided(
23467 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23468 TensorOptions options, DimnameList names
23469 ) override {
23470 const auto& out = outputs_[output_idx].get();
23471 resize_out(out, sizes, strides, options);
23472 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
23473 if (C10_UNLIKELY(maybe_proxy.has_value())) {
23474 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
23475 }
23476 if (!names.empty()) {
23477 namedinference::propagate_names(outputs_[output_idx], names);
23478 }
23479 // super must happen after, so that downstream can use maybe_get_output
23480 // to retrieve the output
23481 }
23482 void set_output_raw_strided(
23483 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23484 TensorOptions options, DimnameList names
23485 ) override {
23486 const auto& out = outputs_[output_idx].get();
23487 resize_out(out, sizes, strides, options);
23488 if (!names.empty()) {
23489 namedinference::propagate_names(outputs_[output_idx], names);
23490 }
23491 // super must happen after, so that downstream can use maybe_get_output
23492 // to retrieve the output
23493 }
23494 const Tensor& maybe_get_output(int64_t output_idx) override {
23495 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
23496 }
23497 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
23498 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
23499};
23500at::Tensor & wrapper_CPU_avg_pool3d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input) {
23501structured_avg_pool3d_backward_out_cpu_out op(grad_input);
23502op.meta(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
23503op.impl(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, op.maybe_get_output(0));
23504if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
23505return grad_input;
23506}
23507struct structured_fractional_max_pool2d_out_cpu_functional final : public at::native::structured_fractional_max_pool2d_out_cpu {
23508 void set_output_strided(
23509 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23510 TensorOptions options, DimnameList names
23511 ) override {
23512 outputs_[output_idx] = create_out(sizes, strides, options);
23513 if (!names.empty()) {
23514 namedinference::propagate_names(*outputs_[output_idx], names);
23515 }
23516 // super must happen after, so that downstream can use maybe_get_output
23517 // to retrieve the output
23518 }
23519 void set_output_raw_strided(
23520 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23521 TensorOptions options, DimnameList names
23522 ) override {
23523 outputs_[output_idx] = create_out(sizes, strides, options);
23524 if (!names.empty()) {
23525 namedinference::propagate_names(*outputs_[output_idx], names);
23526 }
23527 // super must happen after, so that downstream can use maybe_get_output
23528 // to retrieve the output
23529 }
23530 const Tensor& maybe_get_output(int64_t output_idx) override {
23531 return *outputs_[output_idx];
23532 }
23533 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
23534};
23535::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
23536structured_fractional_max_pool2d_out_cpu_functional op;
23537op.meta(self, kernel_size, output_size, random_samples);
23538op.impl(self, kernel_size, output_size, random_samples, *op.outputs_[0], *op.outputs_[1]);
23539return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
23540}
23541struct structured_fractional_max_pool2d_out_cpu_out final : public at::native::structured_fractional_max_pool2d_out_cpu {
23542 structured_fractional_max_pool2d_out_cpu_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
23543 void set_output_strided(
23544 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23545 TensorOptions options, DimnameList names
23546 ) override {
23547 const auto& out = outputs_[output_idx].get();
23548 resize_out(out, sizes, strides, options);
23549 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
23550 if (C10_UNLIKELY(maybe_proxy.has_value())) {
23551 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
23552 }
23553 if (!names.empty()) {
23554 namedinference::propagate_names(outputs_[output_idx], names);
23555 }
23556 // super must happen after, so that downstream can use maybe_get_output
23557 // to retrieve the output
23558 }
23559 void set_output_raw_strided(
23560 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23561 TensorOptions options, DimnameList names
23562 ) override {
23563 const auto& out = outputs_[output_idx].get();
23564 resize_out(out, sizes, strides, options);
23565 if (!names.empty()) {
23566 namedinference::propagate_names(outputs_[output_idx], names);
23567 }
23568 // super must happen after, so that downstream can use maybe_get_output
23569 // to retrieve the output
23570 }
23571 const Tensor& maybe_get_output(int64_t output_idx) override {
23572 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
23573 }
23574 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
23575 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
23576};
23577::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_fractional_max_pool2d_out_output(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
23578structured_fractional_max_pool2d_out_cpu_out op(output, indices);
23579op.meta(self, kernel_size, output_size, random_samples);
23580op.impl(self, kernel_size, output_size, random_samples, op.maybe_get_output(0), op.maybe_get_output(1));
23581if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
23582if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
23583return std::forward_as_tuple(output, indices);
23584}
23585struct structured_fractional_max_pool2d_backward_cpu_functional final : public at::native::structured_fractional_max_pool2d_backward_cpu {
23586 void set_output_strided(
23587 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23588 TensorOptions options, DimnameList names
23589 ) override {
23590 outputs_[output_idx] = create_out(sizes, strides, options);
23591 if (!names.empty()) {
23592 namedinference::propagate_names(*outputs_[output_idx], names);
23593 }
23594 // super must happen after, so that downstream can use maybe_get_output
23595 // to retrieve the output
23596 }
23597 void set_output_raw_strided(
23598 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23599 TensorOptions options, DimnameList names
23600 ) override {
23601 outputs_[output_idx] = create_out(sizes, strides, options);
23602 if (!names.empty()) {
23603 namedinference::propagate_names(*outputs_[output_idx], names);
23604 }
23605 // super must happen after, so that downstream can use maybe_get_output
23606 // to retrieve the output
23607 }
23608 const Tensor& maybe_get_output(int64_t output_idx) override {
23609 return *outputs_[output_idx];
23610 }
23611 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
23612};
23613at::Tensor wrapper_CPU_fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
23614structured_fractional_max_pool2d_backward_cpu_functional op;
23615op.meta(grad_output, self, kernel_size, output_size, indices);
23616op.impl(grad_output, self, kernel_size, output_size, indices, *op.outputs_[0]);
23617return std::move(op.outputs_[0]).take();
23618}
23619struct structured_fractional_max_pool2d_backward_cpu_out final : public at::native::structured_fractional_max_pool2d_backward_cpu {
23620 structured_fractional_max_pool2d_backward_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
23621 void set_output_strided(
23622 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23623 TensorOptions options, DimnameList names
23624 ) override {
23625 const auto& out = outputs_[output_idx].get();
23626 resize_out(out, sizes, strides, options);
23627 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
23628 if (C10_UNLIKELY(maybe_proxy.has_value())) {
23629 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
23630 }
23631 if (!names.empty()) {
23632 namedinference::propagate_names(outputs_[output_idx], names);
23633 }
23634 // super must happen after, so that downstream can use maybe_get_output
23635 // to retrieve the output
23636 }
23637 void set_output_raw_strided(
23638 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23639 TensorOptions options, DimnameList names
23640 ) override {
23641 const auto& out = outputs_[output_idx].get();
23642 resize_out(out, sizes, strides, options);
23643 if (!names.empty()) {
23644 namedinference::propagate_names(outputs_[output_idx], names);
23645 }
23646 // super must happen after, so that downstream can use maybe_get_output
23647 // to retrieve the output
23648 }
23649 const Tensor& maybe_get_output(int64_t output_idx) override {
23650 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
23651 }
23652 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
23653 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
23654};
23655at::Tensor & wrapper_CPU_fractional_max_pool2d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
23656structured_fractional_max_pool2d_backward_cpu_out op(grad_input);
23657op.meta(grad_output, self, kernel_size, output_size, indices);
23658op.impl(grad_output, self, kernel_size, output_size, indices, op.maybe_get_output(0));
23659if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
23660return grad_input;
23661}
23662struct structured_fractional_max_pool3d_out_cpu_functional final : public at::native::structured_fractional_max_pool3d_out_cpu {
23663 void set_output_strided(
23664 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23665 TensorOptions options, DimnameList names
23666 ) override {
23667 outputs_[output_idx] = create_out(sizes, strides, options);
23668 if (!names.empty()) {
23669 namedinference::propagate_names(*outputs_[output_idx], names);
23670 }
23671 // super must happen after, so that downstream can use maybe_get_output
23672 // to retrieve the output
23673 }
23674 void set_output_raw_strided(
23675 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23676 TensorOptions options, DimnameList names
23677 ) override {
23678 outputs_[output_idx] = create_out(sizes, strides, options);
23679 if (!names.empty()) {
23680 namedinference::propagate_names(*outputs_[output_idx], names);
23681 }
23682 // super must happen after, so that downstream can use maybe_get_output
23683 // to retrieve the output
23684 }
23685 const Tensor& maybe_get_output(int64_t output_idx) override {
23686 return *outputs_[output_idx];
23687 }
23688 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
23689};
23690::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
23691structured_fractional_max_pool3d_out_cpu_functional op;
23692auto precompute = op.meta(self, kernel_size, output_size, random_samples);
23693(void)precompute;
23694op.impl(self, precompute.poolSizeT, precompute.poolSizeH, precompute.poolSizeW, precompute.outputT, precompute.outputH, precompute.outputW, random_samples, precompute.numBatch, precompute.numPlanes, precompute.inputT, precompute.inputH, precompute.inputW, *op.outputs_[0], *op.outputs_[1]);
23695return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
23696}
23697struct structured_fractional_max_pool3d_out_cpu_out final : public at::native::structured_fractional_max_pool3d_out_cpu {
23698 structured_fractional_max_pool3d_out_cpu_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
23699 void set_output_strided(
23700 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23701 TensorOptions options, DimnameList names
23702 ) override {
23703 const auto& out = outputs_[output_idx].get();
23704 resize_out(out, sizes, strides, options);
23705 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
23706 if (C10_UNLIKELY(maybe_proxy.has_value())) {
23707 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
23708 }
23709 if (!names.empty()) {
23710 namedinference::propagate_names(outputs_[output_idx], names);
23711 }
23712 // super must happen after, so that downstream can use maybe_get_output
23713 // to retrieve the output
23714 }
23715 void set_output_raw_strided(
23716 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23717 TensorOptions options, DimnameList names
23718 ) override {
23719 const auto& out = outputs_[output_idx].get();
23720 resize_out(out, sizes, strides, options);
23721 if (!names.empty()) {
23722 namedinference::propagate_names(outputs_[output_idx], names);
23723 }
23724 // super must happen after, so that downstream can use maybe_get_output
23725 // to retrieve the output
23726 }
23727 const Tensor& maybe_get_output(int64_t output_idx) override {
23728 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
23729 }
23730 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
23731 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
23732};
23733::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_fractional_max_pool3d_out_output(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
23734structured_fractional_max_pool3d_out_cpu_out op(output, indices);
23735auto precompute = op.meta(self, kernel_size, output_size, random_samples);
23736(void)precompute;
23737op.impl(self, precompute.poolSizeT, precompute.poolSizeH, precompute.poolSizeW, precompute.outputT, precompute.outputH, precompute.outputW, random_samples, precompute.numBatch, precompute.numPlanes, precompute.inputT, precompute.inputH, precompute.inputW, op.maybe_get_output(0), op.maybe_get_output(1));
23738if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
23739if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
23740return std::forward_as_tuple(output, indices);
23741}
23742namespace {
23743at::Tensor wrapper_CPU__fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
23744 // No device check
23745 // DeviceGuard omitted
23746 return at::native::fractional_max_pool3d_backward_cpu(grad_output, self, kernel_size, output_size, indices);
23747}
23748} // anonymous namespace
23749namespace {
23750at::Tensor & wrapper_CPU_grad_input_fractional_max_pool3d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
23751 // No device check
23752 // DeviceGuard omitted
23753 return at::native::fractional_max_pool3d_backward_out_cpu(grad_output, self, kernel_size, output_size, indices, grad_input);
23754}
23755} // anonymous namespace
23756struct structured_max_pool2d_with_indices_out_cpu_functional final : public at::native::structured_max_pool2d_with_indices_out_cpu {
23757 void set_output_strided(
23758 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23759 TensorOptions options, DimnameList names
23760 ) override {
23761 outputs_[output_idx] = create_out(sizes, strides, options);
23762 if (!names.empty()) {
23763 namedinference::propagate_names(*outputs_[output_idx], names);
23764 }
23765 // super must happen after, so that downstream can use maybe_get_output
23766 // to retrieve the output
23767 }
23768 void set_output_raw_strided(
23769 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23770 TensorOptions options, DimnameList names
23771 ) override {
23772 outputs_[output_idx] = create_out(sizes, strides, options);
23773 if (!names.empty()) {
23774 namedinference::propagate_names(*outputs_[output_idx], names);
23775 }
23776 // super must happen after, so that downstream can use maybe_get_output
23777 // to retrieve the output
23778 }
23779 const Tensor& maybe_get_output(int64_t output_idx) override {
23780 return *outputs_[output_idx];
23781 }
23782 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
23783};
23784::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
23785structured_max_pool2d_with_indices_out_cpu_functional op;
23786op.meta(self, kernel_size, stride, padding, dilation, ceil_mode);
23787op.impl(self, kernel_size, stride, padding, dilation, ceil_mode, *op.outputs_[0], *op.outputs_[1]);
23788return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
23789}
23790struct structured_max_pool2d_with_indices_out_cpu_out final : public at::native::structured_max_pool2d_with_indices_out_cpu {
23791 structured_max_pool2d_with_indices_out_cpu_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
23792 void set_output_strided(
23793 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23794 TensorOptions options, DimnameList names
23795 ) override {
23796 const auto& out = outputs_[output_idx].get();
23797 resize_out(out, sizes, strides, options);
23798 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
23799 if (C10_UNLIKELY(maybe_proxy.has_value())) {
23800 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
23801 }
23802 if (!names.empty()) {
23803 namedinference::propagate_names(outputs_[output_idx], names);
23804 }
23805 // super must happen after, so that downstream can use maybe_get_output
23806 // to retrieve the output
23807 }
23808 void set_output_raw_strided(
23809 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23810 TensorOptions options, DimnameList names
23811 ) override {
23812 const auto& out = outputs_[output_idx].get();
23813 resize_out(out, sizes, strides, options);
23814 if (!names.empty()) {
23815 namedinference::propagate_names(outputs_[output_idx], names);
23816 }
23817 // super must happen after, so that downstream can use maybe_get_output
23818 // to retrieve the output
23819 }
23820 const Tensor& maybe_get_output(int64_t output_idx) override {
23821 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
23822 }
23823 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
23824 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
23825};
23826::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_max_pool2d_with_indices_out_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
23827structured_max_pool2d_with_indices_out_cpu_out op(out, indices);
23828op.meta(self, kernel_size, stride, padding, dilation, ceil_mode);
23829op.impl(self, kernel_size, stride, padding, dilation, ceil_mode, op.maybe_get_output(0), op.maybe_get_output(1));
23830if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
23831if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
23832return std::forward_as_tuple(out, indices);
23833}
23834struct structured_max_pool2d_with_indices_backward_out_cpu_functional final : public at::native::structured_max_pool2d_with_indices_backward_out_cpu {
23835 void set_output_strided(
23836 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23837 TensorOptions options, DimnameList names
23838 ) override {
23839 outputs_[output_idx] = create_out(sizes, strides, options);
23840 if (!names.empty()) {
23841 namedinference::propagate_names(*outputs_[output_idx], names);
23842 }
23843 // super must happen after, so that downstream can use maybe_get_output
23844 // to retrieve the output
23845 }
23846 void set_output_raw_strided(
23847 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23848 TensorOptions options, DimnameList names
23849 ) override {
23850 outputs_[output_idx] = create_out(sizes, strides, options);
23851 if (!names.empty()) {
23852 namedinference::propagate_names(*outputs_[output_idx], names);
23853 }
23854 // super must happen after, so that downstream can use maybe_get_output
23855 // to retrieve the output
23856 }
23857 const Tensor& maybe_get_output(int64_t output_idx) override {
23858 return *outputs_[output_idx];
23859 }
23860 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
23861};
23862at::Tensor wrapper_CPU_max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
23863structured_max_pool2d_with_indices_backward_out_cpu_functional op;
23864op.meta(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
23865op.impl(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, *op.outputs_[0]);
23866return std::move(op.outputs_[0]).take();
23867}
23868struct structured_max_pool2d_with_indices_backward_out_cpu_out final : public at::native::structured_max_pool2d_with_indices_backward_out_cpu {
23869 structured_max_pool2d_with_indices_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
23870 void set_output_strided(
23871 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23872 TensorOptions options, DimnameList names
23873 ) override {
23874 const auto& out = outputs_[output_idx].get();
23875 resize_out(out, sizes, strides, options);
23876 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
23877 if (C10_UNLIKELY(maybe_proxy.has_value())) {
23878 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
23879 }
23880 if (!names.empty()) {
23881 namedinference::propagate_names(outputs_[output_idx], names);
23882 }
23883 // super must happen after, so that downstream can use maybe_get_output
23884 // to retrieve the output
23885 }
23886 void set_output_raw_strided(
23887 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23888 TensorOptions options, DimnameList names
23889 ) override {
23890 const auto& out = outputs_[output_idx].get();
23891 resize_out(out, sizes, strides, options);
23892 if (!names.empty()) {
23893 namedinference::propagate_names(outputs_[output_idx], names);
23894 }
23895 // super must happen after, so that downstream can use maybe_get_output
23896 // to retrieve the output
23897 }
23898 const Tensor& maybe_get_output(int64_t output_idx) override {
23899 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
23900 }
23901 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
23902 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
23903};
23904at::Tensor & wrapper_CPU_max_pool2d_with_indices_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
23905structured_max_pool2d_with_indices_backward_out_cpu_out op(grad_input);
23906op.meta(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
23907op.impl(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, op.maybe_get_output(0));
23908if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
23909return grad_input;
23910}
23911namespace {
23912::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__max_pool3d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
23913 // No device check
23914 // DeviceGuard omitted
23915 return at::native::max_pool3d_with_indices_cpu(self, kernel_size, stride, padding, dilation, ceil_mode);
23916}
23917} // anonymous namespace
23918namespace {
23919::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_out_max_pool3d_with_indices_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
23920 // No device check
23921 // DeviceGuard omitted
23922 return at::native::max_pool3d_with_indices_out_cpu(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
23923}
23924} // anonymous namespace
23925namespace {
23926at::Tensor wrapper_CPU__max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
23927 // No device check
23928 // DeviceGuard omitted
23929 return at::native::max_pool3d_with_indices_backward_cpu(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
23930}
23931} // anonymous namespace
23932namespace {
23933at::Tensor & wrapper_CPU_grad_input_max_pool3d_with_indices_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
23934 // No device check
23935 // DeviceGuard omitted
23936 return at::native::max_pool3d_with_indices_backward_out_cpu(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
23937}
23938} // anonymous namespace
23939namespace {
23940at::Tensor wrapper_CPU__max_unpool2d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) {
23941 // No device check
23942 // DeviceGuard omitted
23943 return at::native::max_unpooling2d_forward_cpu(self, indices, output_size);
23944}
23945} // anonymous namespace
23946namespace {
23947at::Tensor & wrapper_CPU_out_max_unpool2d_out(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out) {
23948 // No device check
23949 // DeviceGuard omitted
23950 return at::native::max_unpooling2d_forward_out_cpu(self, indices, output_size, out);
23951}
23952} // anonymous namespace
23953namespace {
23954at::Tensor wrapper_CPU__max_unpool3d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
23955 // No device check
23956 // DeviceGuard omitted
23957 return at::native::max_unpooling3d_forward_cpu(self, indices, output_size, stride, padding);
23958}
23959} // anonymous namespace
23960namespace {
23961at::Tensor & wrapper_CPU_out_max_unpool3d_out(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
23962 // No device check
23963 // DeviceGuard omitted
23964 return at::native::max_unpooling3d_forward_out_cpu(self, indices, output_size, stride, padding, out);
23965}
23966} // anonymous namespace
23967struct structured_reflection_pad1d_out_cpu_functional final : public at::native::structured_reflection_pad1d_out_cpu {
23968 void set_output_strided(
23969 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23970 TensorOptions options, DimnameList names
23971 ) override {
23972 outputs_[output_idx] = create_out(sizes, strides, options);
23973 if (!names.empty()) {
23974 namedinference::propagate_names(*outputs_[output_idx], names);
23975 }
23976 // super must happen after, so that downstream can use maybe_get_output
23977 // to retrieve the output
23978 }
23979 void set_output_raw_strided(
23980 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
23981 TensorOptions options, DimnameList names
23982 ) override {
23983 outputs_[output_idx] = create_out(sizes, strides, options);
23984 if (!names.empty()) {
23985 namedinference::propagate_names(*outputs_[output_idx], names);
23986 }
23987 // super must happen after, so that downstream can use maybe_get_output
23988 // to retrieve the output
23989 }
23990 const Tensor& maybe_get_output(int64_t output_idx) override {
23991 return *outputs_[output_idx];
23992 }
23993 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
23994};
23995at::Tensor wrapper_CPU_reflection_pad1d(const at::Tensor & self, at::IntArrayRef padding) {
23996structured_reflection_pad1d_out_cpu_functional op;
23997op.meta(self, padding);
23998op.impl(self, padding, *op.outputs_[0]);
23999return std::move(op.outputs_[0]).take();
24000}
24001struct structured_reflection_pad1d_out_cpu_out final : public at::native::structured_reflection_pad1d_out_cpu {
24002 structured_reflection_pad1d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
24003 void set_output_strided(
24004 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24005 TensorOptions options, DimnameList names
24006 ) override {
24007 const auto& out = outputs_[output_idx].get();
24008 resize_out(out, sizes, strides, options);
24009 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
24010 if (C10_UNLIKELY(maybe_proxy.has_value())) {
24011 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
24012 }
24013 if (!names.empty()) {
24014 namedinference::propagate_names(outputs_[output_idx], names);
24015 }
24016 // super must happen after, so that downstream can use maybe_get_output
24017 // to retrieve the output
24018 }
24019 void set_output_raw_strided(
24020 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24021 TensorOptions options, DimnameList names
24022 ) override {
24023 const auto& out = outputs_[output_idx].get();
24024 resize_out(out, sizes, strides, options);
24025 if (!names.empty()) {
24026 namedinference::propagate_names(outputs_[output_idx], names);
24027 }
24028 // super must happen after, so that downstream can use maybe_get_output
24029 // to retrieve the output
24030 }
24031 const Tensor& maybe_get_output(int64_t output_idx) override {
24032 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
24033 }
24034 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
24035 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
24036};
24037at::Tensor & wrapper_CPU_reflection_pad1d_out_out(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
24038structured_reflection_pad1d_out_cpu_out op(out);
24039op.meta(self, padding);
24040op.impl(self, padding, op.maybe_get_output(0));
24041if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
24042return out;
24043}
24044struct structured_reflection_pad1d_backward_out_cpu_functional final : public at::native::structured_reflection_pad1d_backward_out_cpu {
24045 void set_output_strided(
24046 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24047 TensorOptions options, DimnameList names
24048 ) override {
24049 outputs_[output_idx] = create_out(sizes, strides, options);
24050 if (!names.empty()) {
24051 namedinference::propagate_names(*outputs_[output_idx], names);
24052 }
24053 // super must happen after, so that downstream can use maybe_get_output
24054 // to retrieve the output
24055 }
24056 void set_output_raw_strided(
24057 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24058 TensorOptions options, DimnameList names
24059 ) override {
24060 outputs_[output_idx] = create_out(sizes, strides, options);
24061 if (!names.empty()) {
24062 namedinference::propagate_names(*outputs_[output_idx], names);
24063 }
24064 // super must happen after, so that downstream can use maybe_get_output
24065 // to retrieve the output
24066 }
24067 const Tensor& maybe_get_output(int64_t output_idx) override {
24068 return *outputs_[output_idx];
24069 }
24070 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
24071};
24072at::Tensor wrapper_CPU_reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
24073structured_reflection_pad1d_backward_out_cpu_functional op;
24074op.meta(grad_output, self, padding);
24075op.impl(grad_output, self, padding, *op.outputs_[0]);
24076return std::move(op.outputs_[0]).take();
24077}
24078struct structured_reflection_pad1d_backward_out_cpu_out final : public at::native::structured_reflection_pad1d_backward_out_cpu {
24079 structured_reflection_pad1d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
24080 void set_output_strided(
24081 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24082 TensorOptions options, DimnameList names
24083 ) override {
24084 const auto& out = outputs_[output_idx].get();
24085 resize_out(out, sizes, strides, options);
24086 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
24087 if (C10_UNLIKELY(maybe_proxy.has_value())) {
24088 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
24089 }
24090 if (!names.empty()) {
24091 namedinference::propagate_names(outputs_[output_idx], names);
24092 }
24093 // super must happen after, so that downstream can use maybe_get_output
24094 // to retrieve the output
24095 }
24096 void set_output_raw_strided(
24097 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24098 TensorOptions options, DimnameList names
24099 ) override {
24100 const auto& out = outputs_[output_idx].get();
24101 resize_out(out, sizes, strides, options);
24102 if (!names.empty()) {
24103 namedinference::propagate_names(outputs_[output_idx], names);
24104 }
24105 // super must happen after, so that downstream can use maybe_get_output
24106 // to retrieve the output
24107 }
24108 const Tensor& maybe_get_output(int64_t output_idx) override {
24109 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
24110 }
24111 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
24112 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
24113};
24114at::Tensor & wrapper_CPU_reflection_pad1d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
24115structured_reflection_pad1d_backward_out_cpu_out op(grad_input);
24116op.meta(grad_output, self, padding);
24117op.impl(grad_output, self, padding, op.maybe_get_output(0));
24118if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
24119return grad_input;
24120}
24121namespace {
24122at::Tensor wrapper_CPU__reflection_pad2d(const at::Tensor & self, c10::SymIntArrayRef padding) {
24123 // No device check
24124 // DeviceGuard omitted
24125 return at::native::reflection_pad2d_cpu(self, C10_AS_INTARRAYREF_SLOW(padding));
24126}
24127} // anonymous namespace
24128namespace {
24129at::Tensor & wrapper_CPU_out_reflection_pad2d_out(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
24130 // No device check
24131 // DeviceGuard omitted
24132 return at::native::reflection_pad2d_out_cpu(self, C10_AS_INTARRAYREF_SLOW(padding), out);
24133}
24134} // anonymous namespace
24135namespace {
24136at::Tensor wrapper_CPU__reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
24137 // No device check
24138 // DeviceGuard omitted
24139 return at::native::reflection_pad2d_backward_cpu(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding));
24140}
24141} // anonymous namespace
24142namespace {
24143at::Tensor & wrapper_CPU_grad_input_reflection_pad2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
24144 // No device check
24145 // DeviceGuard omitted
24146 return at::native::reflection_pad2d_backward_out_cpu(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding), grad_input);
24147}
24148} // anonymous namespace
24149struct structured_reflection_pad3d_out_cpu_functional final : public at::native::structured_reflection_pad3d_out_cpu {
24150 void set_output_strided(
24151 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24152 TensorOptions options, DimnameList names
24153 ) override {
24154 outputs_[output_idx] = create_out(sizes, strides, options);
24155 if (!names.empty()) {
24156 namedinference::propagate_names(*outputs_[output_idx], names);
24157 }
24158 // super must happen after, so that downstream can use maybe_get_output
24159 // to retrieve the output
24160 }
24161 void set_output_raw_strided(
24162 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24163 TensorOptions options, DimnameList names
24164 ) override {
24165 outputs_[output_idx] = create_out(sizes, strides, options);
24166 if (!names.empty()) {
24167 namedinference::propagate_names(*outputs_[output_idx], names);
24168 }
24169 // super must happen after, so that downstream can use maybe_get_output
24170 // to retrieve the output
24171 }
24172 const Tensor& maybe_get_output(int64_t output_idx) override {
24173 return *outputs_[output_idx];
24174 }
24175 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
24176};
24177at::Tensor wrapper_CPU_reflection_pad3d(const at::Tensor & self, at::IntArrayRef padding) {
24178structured_reflection_pad3d_out_cpu_functional op;
24179op.meta(self, padding);
24180op.impl(self, padding, *op.outputs_[0]);
24181return std::move(op.outputs_[0]).take();
24182}
24183struct structured_reflection_pad3d_out_cpu_out final : public at::native::structured_reflection_pad3d_out_cpu {
24184 structured_reflection_pad3d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
24185 void set_output_strided(
24186 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24187 TensorOptions options, DimnameList names
24188 ) override {
24189 const auto& out = outputs_[output_idx].get();
24190 resize_out(out, sizes, strides, options);
24191 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
24192 if (C10_UNLIKELY(maybe_proxy.has_value())) {
24193 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
24194 }
24195 if (!names.empty()) {
24196 namedinference::propagate_names(outputs_[output_idx], names);
24197 }
24198 // super must happen after, so that downstream can use maybe_get_output
24199 // to retrieve the output
24200 }
24201 void set_output_raw_strided(
24202 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24203 TensorOptions options, DimnameList names
24204 ) override {
24205 const auto& out = outputs_[output_idx].get();
24206 resize_out(out, sizes, strides, options);
24207 if (!names.empty()) {
24208 namedinference::propagate_names(outputs_[output_idx], names);
24209 }
24210 // super must happen after, so that downstream can use maybe_get_output
24211 // to retrieve the output
24212 }
24213 const Tensor& maybe_get_output(int64_t output_idx) override {
24214 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
24215 }
24216 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
24217 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
24218};
24219at::Tensor & wrapper_CPU_reflection_pad3d_out_out(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
24220structured_reflection_pad3d_out_cpu_out op(out);
24221op.meta(self, padding);
24222op.impl(self, padding, op.maybe_get_output(0));
24223if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
24224return out;
24225}
24226struct structured_reflection_pad3d_backward_out_cpu_functional final : public at::native::structured_reflection_pad3d_backward_out_cpu {
24227 void set_output_strided(
24228 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24229 TensorOptions options, DimnameList names
24230 ) override {
24231 outputs_[output_idx] = create_out(sizes, strides, options);
24232 if (!names.empty()) {
24233 namedinference::propagate_names(*outputs_[output_idx], names);
24234 }
24235 // super must happen after, so that downstream can use maybe_get_output
24236 // to retrieve the output
24237 }
24238 void set_output_raw_strided(
24239 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24240 TensorOptions options, DimnameList names
24241 ) override {
24242 outputs_[output_idx] = create_out(sizes, strides, options);
24243 if (!names.empty()) {
24244 namedinference::propagate_names(*outputs_[output_idx], names);
24245 }
24246 // super must happen after, so that downstream can use maybe_get_output
24247 // to retrieve the output
24248 }
24249 const Tensor& maybe_get_output(int64_t output_idx) override {
24250 return *outputs_[output_idx];
24251 }
24252 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
24253};
24254at::Tensor wrapper_CPU_reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
24255structured_reflection_pad3d_backward_out_cpu_functional op;
24256op.meta(grad_output, self, padding);
24257op.impl(grad_output, self, padding, *op.outputs_[0]);
24258return std::move(op.outputs_[0]).take();
24259}
24260struct structured_reflection_pad3d_backward_out_cpu_out final : public at::native::structured_reflection_pad3d_backward_out_cpu {
24261 structured_reflection_pad3d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
24262 void set_output_strided(
24263 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24264 TensorOptions options, DimnameList names
24265 ) override {
24266 const auto& out = outputs_[output_idx].get();
24267 resize_out(out, sizes, strides, options);
24268 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
24269 if (C10_UNLIKELY(maybe_proxy.has_value())) {
24270 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
24271 }
24272 if (!names.empty()) {
24273 namedinference::propagate_names(outputs_[output_idx], names);
24274 }
24275 // super must happen after, so that downstream can use maybe_get_output
24276 // to retrieve the output
24277 }
24278 void set_output_raw_strided(
24279 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24280 TensorOptions options, DimnameList names
24281 ) override {
24282 const auto& out = outputs_[output_idx].get();
24283 resize_out(out, sizes, strides, options);
24284 if (!names.empty()) {
24285 namedinference::propagate_names(outputs_[output_idx], names);
24286 }
24287 // super must happen after, so that downstream can use maybe_get_output
24288 // to retrieve the output
24289 }
24290 const Tensor& maybe_get_output(int64_t output_idx) override {
24291 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
24292 }
24293 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
24294 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
24295};
24296at::Tensor & wrapper_CPU_reflection_pad3d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
24297structured_reflection_pad3d_backward_out_cpu_out op(grad_input);
24298op.meta(grad_output, self, padding);
24299op.impl(grad_output, self, padding, op.maybe_get_output(0));
24300if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
24301return grad_input;
24302}
24303struct structured_replication_pad1d_out_cpu_functional final : public at::native::structured_replication_pad1d_out_cpu {
24304 void set_output_strided(
24305 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24306 TensorOptions options, DimnameList names
24307 ) override {
24308 outputs_[output_idx] = create_out(sizes, strides, options);
24309 if (!names.empty()) {
24310 namedinference::propagate_names(*outputs_[output_idx], names);
24311 }
24312 // super must happen after, so that downstream can use maybe_get_output
24313 // to retrieve the output
24314 }
24315 void set_output_raw_strided(
24316 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24317 TensorOptions options, DimnameList names
24318 ) override {
24319 outputs_[output_idx] = create_out(sizes, strides, options);
24320 if (!names.empty()) {
24321 namedinference::propagate_names(*outputs_[output_idx], names);
24322 }
24323 // super must happen after, so that downstream can use maybe_get_output
24324 // to retrieve the output
24325 }
24326 const Tensor& maybe_get_output(int64_t output_idx) override {
24327 return *outputs_[output_idx];
24328 }
24329 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
24330};
24331at::Tensor wrapper_CPU_replication_pad1d(const at::Tensor & self, at::IntArrayRef padding) {
24332structured_replication_pad1d_out_cpu_functional op;
24333op.meta(self, padding);
24334op.impl(self, padding, *op.outputs_[0]);
24335return std::move(op.outputs_[0]).take();
24336}
24337struct structured_replication_pad1d_out_cpu_out final : public at::native::structured_replication_pad1d_out_cpu {
24338 structured_replication_pad1d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
24339 void set_output_strided(
24340 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24341 TensorOptions options, DimnameList names
24342 ) override {
24343 const auto& out = outputs_[output_idx].get();
24344 resize_out(out, sizes, strides, options);
24345 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
24346 if (C10_UNLIKELY(maybe_proxy.has_value())) {
24347 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
24348 }
24349 if (!names.empty()) {
24350 namedinference::propagate_names(outputs_[output_idx], names);
24351 }
24352 // super must happen after, so that downstream can use maybe_get_output
24353 // to retrieve the output
24354 }
24355 void set_output_raw_strided(
24356 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24357 TensorOptions options, DimnameList names
24358 ) override {
24359 const auto& out = outputs_[output_idx].get();
24360 resize_out(out, sizes, strides, options);
24361 if (!names.empty()) {
24362 namedinference::propagate_names(outputs_[output_idx], names);
24363 }
24364 // super must happen after, so that downstream can use maybe_get_output
24365 // to retrieve the output
24366 }
24367 const Tensor& maybe_get_output(int64_t output_idx) override {
24368 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
24369 }
24370 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
24371 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
24372};
24373at::Tensor & wrapper_CPU_replication_pad1d_out_out(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
24374structured_replication_pad1d_out_cpu_out op(out);
24375op.meta(self, padding);
24376op.impl(self, padding, op.maybe_get_output(0));
24377if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
24378return out;
24379}
24380struct structured_replication_pad1d_backward_out_cpu_functional final : public at::native::structured_replication_pad1d_backward_out_cpu {
24381 void set_output_strided(
24382 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24383 TensorOptions options, DimnameList names
24384 ) override {
24385 outputs_[output_idx] = create_out(sizes, strides, options);
24386 if (!names.empty()) {
24387 namedinference::propagate_names(*outputs_[output_idx], names);
24388 }
24389 // super must happen after, so that downstream can use maybe_get_output
24390 // to retrieve the output
24391 }
24392 void set_output_raw_strided(
24393 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24394 TensorOptions options, DimnameList names
24395 ) override {
24396 outputs_[output_idx] = create_out(sizes, strides, options);
24397 if (!names.empty()) {
24398 namedinference::propagate_names(*outputs_[output_idx], names);
24399 }
24400 // super must happen after, so that downstream can use maybe_get_output
24401 // to retrieve the output
24402 }
24403 const Tensor& maybe_get_output(int64_t output_idx) override {
24404 return *outputs_[output_idx];
24405 }
24406 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
24407};
24408at::Tensor wrapper_CPU_replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
24409structured_replication_pad1d_backward_out_cpu_functional op;
24410op.meta(grad_output, self, padding);
24411op.impl(grad_output, self, padding, *op.outputs_[0]);
24412return std::move(op.outputs_[0]).take();
24413}
24414struct structured_replication_pad1d_backward_out_cpu_out final : public at::native::structured_replication_pad1d_backward_out_cpu {
24415 structured_replication_pad1d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
24416 void set_output_strided(
24417 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24418 TensorOptions options, DimnameList names
24419 ) override {
24420 const auto& out = outputs_[output_idx].get();
24421 resize_out(out, sizes, strides, options);
24422 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
24423 if (C10_UNLIKELY(maybe_proxy.has_value())) {
24424 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
24425 }
24426 if (!names.empty()) {
24427 namedinference::propagate_names(outputs_[output_idx], names);
24428 }
24429 // super must happen after, so that downstream can use maybe_get_output
24430 // to retrieve the output
24431 }
24432 void set_output_raw_strided(
24433 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24434 TensorOptions options, DimnameList names
24435 ) override {
24436 const auto& out = outputs_[output_idx].get();
24437 resize_out(out, sizes, strides, options);
24438 if (!names.empty()) {
24439 namedinference::propagate_names(outputs_[output_idx], names);
24440 }
24441 // super must happen after, so that downstream can use maybe_get_output
24442 // to retrieve the output
24443 }
24444 const Tensor& maybe_get_output(int64_t output_idx) override {
24445 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
24446 }
24447 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
24448 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
24449};
24450at::Tensor & wrapper_CPU_replication_pad1d_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
24451structured_replication_pad1d_backward_out_cpu_out op(grad_input);
24452op.meta(grad_output, self, padding);
24453op.impl(grad_output, self, padding, op.maybe_get_output(0));
24454if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
24455return grad_input;
24456}
24457struct structured_replication_pad2d_out_cpu_functional final : public at::native::structured_replication_pad2d_out_cpu {
24458 void set_output_strided(
24459 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24460 TensorOptions options, DimnameList names
24461 ) override {
24462 outputs_[output_idx] = create_out(sizes, strides, options);
24463 if (!names.empty()) {
24464 namedinference::propagate_names(*outputs_[output_idx], names);
24465 }
24466 // super must happen after, so that downstream can use maybe_get_output
24467 // to retrieve the output
24468 }
24469 void set_output_raw_strided(
24470 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24471 TensorOptions options, DimnameList names
24472 ) override {
24473 outputs_[output_idx] = create_out(sizes, strides, options);
24474 if (!names.empty()) {
24475 namedinference::propagate_names(*outputs_[output_idx], names);
24476 }
24477 // super must happen after, so that downstream can use maybe_get_output
24478 // to retrieve the output
24479 }
24480 const Tensor& maybe_get_output(int64_t output_idx) override {
24481 return *outputs_[output_idx];
24482 }
24483 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
24484};
24485at::Tensor wrapper_CPU_replication_pad2d(const at::Tensor & self, at::IntArrayRef padding) {
24486structured_replication_pad2d_out_cpu_functional op;
24487op.meta(self, padding);
24488op.impl(self, padding, *op.outputs_[0]);
24489return std::move(op.outputs_[0]).take();
24490}
24491struct structured_replication_pad2d_out_cpu_out final : public at::native::structured_replication_pad2d_out_cpu {
24492 structured_replication_pad2d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
24493 void set_output_strided(
24494 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24495 TensorOptions options, DimnameList names
24496 ) override {
24497 const auto& out = outputs_[output_idx].get();
24498 resize_out(out, sizes, strides, options);
24499 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
24500 if (C10_UNLIKELY(maybe_proxy.has_value())) {
24501 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
24502 }
24503 if (!names.empty()) {
24504 namedinference::propagate_names(outputs_[output_idx], names);
24505 }
24506 // super must happen after, so that downstream can use maybe_get_output
24507 // to retrieve the output
24508 }
24509 void set_output_raw_strided(
24510 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24511 TensorOptions options, DimnameList names
24512 ) override {
24513 const auto& out = outputs_[output_idx].get();
24514 resize_out(out, sizes, strides, options);
24515 if (!names.empty()) {
24516 namedinference::propagate_names(outputs_[output_idx], names);
24517 }
24518 // super must happen after, so that downstream can use maybe_get_output
24519 // to retrieve the output
24520 }
24521 const Tensor& maybe_get_output(int64_t output_idx) override {
24522 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
24523 }
24524 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
24525 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
24526};
24527at::Tensor & wrapper_CPU_replication_pad2d_out_out(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
24528structured_replication_pad2d_out_cpu_out op(out);
24529op.meta(self, padding);
24530op.impl(self, padding, op.maybe_get_output(0));
24531if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
24532return out;
24533}
24534namespace {
24535at::Tensor wrapper_CPU__replication_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
24536 // No device check
24537 // DeviceGuard omitted
24538 return at::native::replication_pad2d_backward_cpu(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding));
24539}
24540} // anonymous namespace
24541namespace {
24542at::Tensor & wrapper_CPU_grad_input_replication_pad2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
24543 // No device check
24544 // DeviceGuard omitted
24545 return at::native::replication_pad2d_backward_out_cpu(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding), grad_input);
24546}
24547} // anonymous namespace
24548struct structured_replication_pad3d_out_cpu_functional final : public at::native::structured_replication_pad3d_out_cpu {
24549 void set_output_strided(
24550 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24551 TensorOptions options, DimnameList names
24552 ) override {
24553 outputs_[output_idx] = create_out(sizes, strides, options);
24554 if (!names.empty()) {
24555 namedinference::propagate_names(*outputs_[output_idx], names);
24556 }
24557 // super must happen after, so that downstream can use maybe_get_output
24558 // to retrieve the output
24559 }
24560 void set_output_raw_strided(
24561 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24562 TensorOptions options, DimnameList names
24563 ) override {
24564 outputs_[output_idx] = create_out(sizes, strides, options);
24565 if (!names.empty()) {
24566 namedinference::propagate_names(*outputs_[output_idx], names);
24567 }
24568 // super must happen after, so that downstream can use maybe_get_output
24569 // to retrieve the output
24570 }
24571 const Tensor& maybe_get_output(int64_t output_idx) override {
24572 return *outputs_[output_idx];
24573 }
24574 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
24575};
24576at::Tensor wrapper_CPU_replication_pad3d(const at::Tensor & self, at::IntArrayRef padding) {
24577structured_replication_pad3d_out_cpu_functional op;
24578op.meta(self, padding);
24579op.impl(self, padding, *op.outputs_[0]);
24580return std::move(op.outputs_[0]).take();
24581}
24582struct structured_replication_pad3d_out_cpu_out final : public at::native::structured_replication_pad3d_out_cpu {
24583 structured_replication_pad3d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
24584 void set_output_strided(
24585 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24586 TensorOptions options, DimnameList names
24587 ) override {
24588 const auto& out = outputs_[output_idx].get();
24589 resize_out(out, sizes, strides, options);
24590 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
24591 if (C10_UNLIKELY(maybe_proxy.has_value())) {
24592 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
24593 }
24594 if (!names.empty()) {
24595 namedinference::propagate_names(outputs_[output_idx], names);
24596 }
24597 // super must happen after, so that downstream can use maybe_get_output
24598 // to retrieve the output
24599 }
24600 void set_output_raw_strided(
24601 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24602 TensorOptions options, DimnameList names
24603 ) override {
24604 const auto& out = outputs_[output_idx].get();
24605 resize_out(out, sizes, strides, options);
24606 if (!names.empty()) {
24607 namedinference::propagate_names(outputs_[output_idx], names);
24608 }
24609 // super must happen after, so that downstream can use maybe_get_output
24610 // to retrieve the output
24611 }
24612 const Tensor& maybe_get_output(int64_t output_idx) override {
24613 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
24614 }
24615 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
24616 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
24617};
24618at::Tensor & wrapper_CPU_replication_pad3d_out_out(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
24619structured_replication_pad3d_out_cpu_out op(out);
24620op.meta(self, padding);
24621op.impl(self, padding, op.maybe_get_output(0));
24622if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
24623return out;
24624}
24625namespace {
24626at::Tensor wrapper_CPU__replication_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
24627 // No device check
24628 // DeviceGuard omitted
24629 return at::native::replication_pad3d_backward_cpu(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding));
24630}
24631} // anonymous namespace
24632namespace {
24633at::Tensor & wrapper_CPU_grad_input_replication_pad3d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
24634 // No device check
24635 // DeviceGuard omitted
24636 return at::native::replication_pad3d_backward_out_cpu(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding), grad_input);
24637}
24638} // anonymous namespace
24639struct structured_upsample_linear1d_out_cpu_functional final : public at::native::structured_upsample_linear1d_out_cpu {
24640 void set_output_strided(
24641 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24642 TensorOptions options, DimnameList names
24643 ) override {
24644 outputs_[output_idx] = create_out(sizes, strides, options);
24645 if (!names.empty()) {
24646 namedinference::propagate_names(*outputs_[output_idx], names);
24647 }
24648 // super must happen after, so that downstream can use maybe_get_output
24649 // to retrieve the output
24650 }
24651 void set_output_raw_strided(
24652 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24653 TensorOptions options, DimnameList names
24654 ) override {
24655 outputs_[output_idx] = create_out(sizes, strides, options);
24656 if (!names.empty()) {
24657 namedinference::propagate_names(*outputs_[output_idx], names);
24658 }
24659 // super must happen after, so that downstream can use maybe_get_output
24660 // to retrieve the output
24661 }
24662 const Tensor& maybe_get_output(int64_t output_idx) override {
24663 return *outputs_[output_idx];
24664 }
24665 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
24666};
24667at::Tensor wrapper_CPU_upsample_linear1d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
24668structured_upsample_linear1d_out_cpu_functional op;
24669op.meta(self, output_size, align_corners, scales);
24670op.impl(self, output_size, align_corners, scales, *op.outputs_[0]);
24671return std::move(op.outputs_[0]).take();
24672}
24673struct structured_upsample_linear1d_out_cpu_out final : public at::native::structured_upsample_linear1d_out_cpu {
24674 structured_upsample_linear1d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
24675 void set_output_strided(
24676 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24677 TensorOptions options, DimnameList names
24678 ) override {
24679 const auto& out = outputs_[output_idx].get();
24680 resize_out(out, sizes, strides, options);
24681 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
24682 if (C10_UNLIKELY(maybe_proxy.has_value())) {
24683 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
24684 }
24685 if (!names.empty()) {
24686 namedinference::propagate_names(outputs_[output_idx], names);
24687 }
24688 // super must happen after, so that downstream can use maybe_get_output
24689 // to retrieve the output
24690 }
24691 void set_output_raw_strided(
24692 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24693 TensorOptions options, DimnameList names
24694 ) override {
24695 const auto& out = outputs_[output_idx].get();
24696 resize_out(out, sizes, strides, options);
24697 if (!names.empty()) {
24698 namedinference::propagate_names(outputs_[output_idx], names);
24699 }
24700 // super must happen after, so that downstream can use maybe_get_output
24701 // to retrieve the output
24702 }
24703 const Tensor& maybe_get_output(int64_t output_idx) override {
24704 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
24705 }
24706 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
24707 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
24708};
24709at::Tensor & wrapper_CPU_upsample_linear1d_out_out(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales, at::Tensor & out) {
24710structured_upsample_linear1d_out_cpu_out op(out);
24711op.meta(self, output_size, align_corners, scales);
24712op.impl(self, output_size, align_corners, scales, op.maybe_get_output(0));
24713if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
24714return out;
24715}
24716struct structured_upsample_linear1d_backward_out_cpu_functional final : public at::native::structured_upsample_linear1d_backward_out_cpu {
24717 void set_output_strided(
24718 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24719 TensorOptions options, DimnameList names
24720 ) override {
24721 outputs_[output_idx] = create_out(sizes, strides, options);
24722 if (!names.empty()) {
24723 namedinference::propagate_names(*outputs_[output_idx], names);
24724 }
24725 // super must happen after, so that downstream can use maybe_get_output
24726 // to retrieve the output
24727 }
24728 void set_output_raw_strided(
24729 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24730 TensorOptions options, DimnameList names
24731 ) override {
24732 outputs_[output_idx] = create_out(sizes, strides, options);
24733 if (!names.empty()) {
24734 namedinference::propagate_names(*outputs_[output_idx], names);
24735 }
24736 // super must happen after, so that downstream can use maybe_get_output
24737 // to retrieve the output
24738 }
24739 const Tensor& maybe_get_output(int64_t output_idx) override {
24740 return *outputs_[output_idx];
24741 }
24742 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
24743};
24744at::Tensor wrapper_CPU_upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
24745structured_upsample_linear1d_backward_out_cpu_functional op;
24746op.meta(grad_output, output_size, input_size, align_corners, scales);
24747op.impl(grad_output, output_size, input_size, align_corners, scales, *op.outputs_[0]);
24748return std::move(op.outputs_[0]).take();
24749}
24750struct structured_upsample_linear1d_backward_out_cpu_out final : public at::native::structured_upsample_linear1d_backward_out_cpu {
24751 structured_upsample_linear1d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
24752 void set_output_strided(
24753 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24754 TensorOptions options, DimnameList names
24755 ) override {
24756 const auto& out = outputs_[output_idx].get();
24757 resize_out(out, sizes, strides, options);
24758 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
24759 if (C10_UNLIKELY(maybe_proxy.has_value())) {
24760 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
24761 }
24762 if (!names.empty()) {
24763 namedinference::propagate_names(outputs_[output_idx], names);
24764 }
24765 // super must happen after, so that downstream can use maybe_get_output
24766 // to retrieve the output
24767 }
24768 void set_output_raw_strided(
24769 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24770 TensorOptions options, DimnameList names
24771 ) override {
24772 const auto& out = outputs_[output_idx].get();
24773 resize_out(out, sizes, strides, options);
24774 if (!names.empty()) {
24775 namedinference::propagate_names(outputs_[output_idx], names);
24776 }
24777 // super must happen after, so that downstream can use maybe_get_output
24778 // to retrieve the output
24779 }
24780 const Tensor& maybe_get_output(int64_t output_idx) override {
24781 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
24782 }
24783 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
24784 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
24785};
24786at::Tensor & wrapper_CPU_upsample_linear1d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) {
24787structured_upsample_linear1d_backward_out_cpu_out op(grad_input);
24788op.meta(grad_output, output_size, input_size, align_corners, scales);
24789op.impl(grad_output, output_size, input_size, align_corners, scales, op.maybe_get_output(0));
24790if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
24791return grad_input;
24792}
24793struct structured_upsample_bilinear2d_out_cpu_functional final : public at::native::structured_upsample_bilinear2d_out_cpu {
24794 void set_output_strided(
24795 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24796 TensorOptions options, DimnameList names
24797 ) override {
24798 outputs_[output_idx] = create_out(sizes, strides, options);
24799 if (!names.empty()) {
24800 namedinference::propagate_names(*outputs_[output_idx], names);
24801 }
24802 // super must happen after, so that downstream can use maybe_get_output
24803 // to retrieve the output
24804 }
24805 void set_output_raw_strided(
24806 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24807 TensorOptions options, DimnameList names
24808 ) override {
24809 outputs_[output_idx] = create_out(sizes, strides, options);
24810 if (!names.empty()) {
24811 namedinference::propagate_names(*outputs_[output_idx], names);
24812 }
24813 // super must happen after, so that downstream can use maybe_get_output
24814 // to retrieve the output
24815 }
24816 const Tensor& maybe_get_output(int64_t output_idx) override {
24817 return *outputs_[output_idx];
24818 }
24819 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
24820};
24821at::Tensor wrapper_CPU_upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24822structured_upsample_bilinear2d_out_cpu_functional op;
24823op.meta(self, output_size, align_corners, scales_h, scales_w);
24824op.impl(self, output_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
24825return std::move(op.outputs_[0]).take();
24826}
24827struct structured_upsample_bilinear2d_out_cpu_out final : public at::native::structured_upsample_bilinear2d_out_cpu {
24828 structured_upsample_bilinear2d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
24829 void set_output_strided(
24830 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24831 TensorOptions options, DimnameList names
24832 ) override {
24833 const auto& out = outputs_[output_idx].get();
24834 resize_out(out, sizes, strides, options);
24835 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
24836 if (C10_UNLIKELY(maybe_proxy.has_value())) {
24837 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
24838 }
24839 if (!names.empty()) {
24840 namedinference::propagate_names(outputs_[output_idx], names);
24841 }
24842 // super must happen after, so that downstream can use maybe_get_output
24843 // to retrieve the output
24844 }
24845 void set_output_raw_strided(
24846 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24847 TensorOptions options, DimnameList names
24848 ) override {
24849 const auto& out = outputs_[output_idx].get();
24850 resize_out(out, sizes, strides, options);
24851 if (!names.empty()) {
24852 namedinference::propagate_names(outputs_[output_idx], names);
24853 }
24854 // super must happen after, so that downstream can use maybe_get_output
24855 // to retrieve the output
24856 }
24857 const Tensor& maybe_get_output(int64_t output_idx) override {
24858 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
24859 }
24860 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
24861 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
24862};
24863at::Tensor & wrapper_CPU_upsample_bilinear2d_out_out(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
24864structured_upsample_bilinear2d_out_cpu_out op(out);
24865op.meta(self, output_size, align_corners, scales_h, scales_w);
24866op.impl(self, output_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
24867if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
24868return out;
24869}
24870struct structured_upsample_bilinear2d_backward_out_cpu_functional final : public at::native::structured_upsample_bilinear2d_backward_out_cpu {
24871 void set_output_strided(
24872 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24873 TensorOptions options, DimnameList names
24874 ) override {
24875 outputs_[output_idx] = create_out(sizes, strides, options);
24876 if (!names.empty()) {
24877 namedinference::propagate_names(*outputs_[output_idx], names);
24878 }
24879 // super must happen after, so that downstream can use maybe_get_output
24880 // to retrieve the output
24881 }
24882 void set_output_raw_strided(
24883 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24884 TensorOptions options, DimnameList names
24885 ) override {
24886 outputs_[output_idx] = create_out(sizes, strides, options);
24887 if (!names.empty()) {
24888 namedinference::propagate_names(*outputs_[output_idx], names);
24889 }
24890 // super must happen after, so that downstream can use maybe_get_output
24891 // to retrieve the output
24892 }
24893 const Tensor& maybe_get_output(int64_t output_idx) override {
24894 return *outputs_[output_idx];
24895 }
24896 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
24897};
24898at::Tensor wrapper_CPU_upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24899structured_upsample_bilinear2d_backward_out_cpu_functional op;
24900op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
24901op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
24902return std::move(op.outputs_[0]).take();
24903}
24904struct structured_upsample_bilinear2d_backward_out_cpu_out final : public at::native::structured_upsample_bilinear2d_backward_out_cpu {
24905 structured_upsample_bilinear2d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
24906 void set_output_strided(
24907 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24908 TensorOptions options, DimnameList names
24909 ) override {
24910 const auto& out = outputs_[output_idx].get();
24911 resize_out(out, sizes, strides, options);
24912 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
24913 if (C10_UNLIKELY(maybe_proxy.has_value())) {
24914 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
24915 }
24916 if (!names.empty()) {
24917 namedinference::propagate_names(outputs_[output_idx], names);
24918 }
24919 // super must happen after, so that downstream can use maybe_get_output
24920 // to retrieve the output
24921 }
24922 void set_output_raw_strided(
24923 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24924 TensorOptions options, DimnameList names
24925 ) override {
24926 const auto& out = outputs_[output_idx].get();
24927 resize_out(out, sizes, strides, options);
24928 if (!names.empty()) {
24929 namedinference::propagate_names(outputs_[output_idx], names);
24930 }
24931 // super must happen after, so that downstream can use maybe_get_output
24932 // to retrieve the output
24933 }
24934 const Tensor& maybe_get_output(int64_t output_idx) override {
24935 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
24936 }
24937 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
24938 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
24939};
24940at::Tensor & wrapper_CPU_upsample_bilinear2d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
24941structured_upsample_bilinear2d_backward_out_cpu_out op(grad_input);
24942op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
24943op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
24944if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
24945return grad_input;
24946}
24947struct structured__upsample_bilinear2d_aa_out_cpu_functional final : public at::native::structured__upsample_bilinear2d_aa_out_cpu {
24948 void set_output_strided(
24949 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24950 TensorOptions options, DimnameList names
24951 ) override {
24952 outputs_[output_idx] = create_out(sizes, strides, options);
24953 if (!names.empty()) {
24954 namedinference::propagate_names(*outputs_[output_idx], names);
24955 }
24956 // super must happen after, so that downstream can use maybe_get_output
24957 // to retrieve the output
24958 }
24959 void set_output_raw_strided(
24960 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24961 TensorOptions options, DimnameList names
24962 ) override {
24963 outputs_[output_idx] = create_out(sizes, strides, options);
24964 if (!names.empty()) {
24965 namedinference::propagate_names(*outputs_[output_idx], names);
24966 }
24967 // super must happen after, so that downstream can use maybe_get_output
24968 // to retrieve the output
24969 }
24970 const Tensor& maybe_get_output(int64_t output_idx) override {
24971 return *outputs_[output_idx];
24972 }
24973 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
24974};
24975at::Tensor wrapper_CPU__upsample_bilinear2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24976structured__upsample_bilinear2d_aa_out_cpu_functional op;
24977op.meta(self, output_size, align_corners, scales_h, scales_w);
24978op.impl(self, output_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
24979return std::move(op.outputs_[0]).take();
24980}
24981struct structured__upsample_bilinear2d_aa_out_cpu_out final : public at::native::structured__upsample_bilinear2d_aa_out_cpu {
24982 structured__upsample_bilinear2d_aa_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
24983 void set_output_strided(
24984 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
24985 TensorOptions options, DimnameList names
24986 ) override {
24987 const auto& out = outputs_[output_idx].get();
24988 resize_out(out, sizes, strides, options);
24989 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
24990 if (C10_UNLIKELY(maybe_proxy.has_value())) {
24991 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
24992 }
24993 if (!names.empty()) {
24994 namedinference::propagate_names(outputs_[output_idx], names);
24995 }
24996 // super must happen after, so that downstream can use maybe_get_output
24997 // to retrieve the output
24998 }
24999 void set_output_raw_strided(
25000 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25001 TensorOptions options, DimnameList names
25002 ) override {
25003 const auto& out = outputs_[output_idx].get();
25004 resize_out(out, sizes, strides, options);
25005 if (!names.empty()) {
25006 namedinference::propagate_names(outputs_[output_idx], names);
25007 }
25008 // super must happen after, so that downstream can use maybe_get_output
25009 // to retrieve the output
25010 }
25011 const Tensor& maybe_get_output(int64_t output_idx) override {
25012 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
25013 }
25014 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
25015 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
25016};
25017at::Tensor & wrapper_CPU__upsample_bilinear2d_aa_out_out(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
25018structured__upsample_bilinear2d_aa_out_cpu_out op(out);
25019op.meta(self, output_size, align_corners, scales_h, scales_w);
25020op.impl(self, output_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
25021if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
25022return out;
25023}
25024struct structured__upsample_bilinear2d_aa_backward_out_cpu_functional final : public at::native::structured__upsample_bilinear2d_aa_backward_out_cpu {
25025 void set_output_strided(
25026 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25027 TensorOptions options, DimnameList names
25028 ) override {
25029 outputs_[output_idx] = create_out(sizes, strides, options);
25030 if (!names.empty()) {
25031 namedinference::propagate_names(*outputs_[output_idx], names);
25032 }
25033 // super must happen after, so that downstream can use maybe_get_output
25034 // to retrieve the output
25035 }
25036 void set_output_raw_strided(
25037 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25038 TensorOptions options, DimnameList names
25039 ) override {
25040 outputs_[output_idx] = create_out(sizes, strides, options);
25041 if (!names.empty()) {
25042 namedinference::propagate_names(*outputs_[output_idx], names);
25043 }
25044 // super must happen after, so that downstream can use maybe_get_output
25045 // to retrieve the output
25046 }
25047 const Tensor& maybe_get_output(int64_t output_idx) override {
25048 return *outputs_[output_idx];
25049 }
25050 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
25051};
25052at::Tensor wrapper_CPU__upsample_bilinear2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
25053structured__upsample_bilinear2d_aa_backward_out_cpu_functional op;
25054op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
25055op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
25056return std::move(op.outputs_[0]).take();
25057}
25058struct structured__upsample_bilinear2d_aa_backward_out_cpu_out final : public at::native::structured__upsample_bilinear2d_aa_backward_out_cpu {
25059 structured__upsample_bilinear2d_aa_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
25060 void set_output_strided(
25061 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25062 TensorOptions options, DimnameList names
25063 ) override {
25064 const auto& out = outputs_[output_idx].get();
25065 resize_out(out, sizes, strides, options);
25066 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
25067 if (C10_UNLIKELY(maybe_proxy.has_value())) {
25068 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
25069 }
25070 if (!names.empty()) {
25071 namedinference::propagate_names(outputs_[output_idx], names);
25072 }
25073 // super must happen after, so that downstream can use maybe_get_output
25074 // to retrieve the output
25075 }
25076 void set_output_raw_strided(
25077 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25078 TensorOptions options, DimnameList names
25079 ) override {
25080 const auto& out = outputs_[output_idx].get();
25081 resize_out(out, sizes, strides, options);
25082 if (!names.empty()) {
25083 namedinference::propagate_names(outputs_[output_idx], names);
25084 }
25085 // super must happen after, so that downstream can use maybe_get_output
25086 // to retrieve the output
25087 }
25088 const Tensor& maybe_get_output(int64_t output_idx) override {
25089 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
25090 }
25091 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
25092 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
25093};
25094at::Tensor & wrapper_CPU__upsample_bilinear2d_aa_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
25095structured__upsample_bilinear2d_aa_backward_out_cpu_out op(grad_input);
25096op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
25097op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
25098if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
25099return grad_input;
25100}
25101struct structured_upsample_bicubic2d_out_cpu_functional final : public at::native::structured_upsample_bicubic2d_out_cpu {
25102 void set_output_strided(
25103 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25104 TensorOptions options, DimnameList names
25105 ) override {
25106 outputs_[output_idx] = create_out(sizes, strides, options);
25107 if (!names.empty()) {
25108 namedinference::propagate_names(*outputs_[output_idx], names);
25109 }
25110 // super must happen after, so that downstream can use maybe_get_output
25111 // to retrieve the output
25112 }
25113 void set_output_raw_strided(
25114 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25115 TensorOptions options, DimnameList names
25116 ) override {
25117 outputs_[output_idx] = create_out(sizes, strides, options);
25118 if (!names.empty()) {
25119 namedinference::propagate_names(*outputs_[output_idx], names);
25120 }
25121 // super must happen after, so that downstream can use maybe_get_output
25122 // to retrieve the output
25123 }
25124 const Tensor& maybe_get_output(int64_t output_idx) override {
25125 return *outputs_[output_idx];
25126 }
25127 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
25128};
25129at::Tensor wrapper_CPU_upsample_bicubic2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
25130structured_upsample_bicubic2d_out_cpu_functional op;
25131op.meta(self, output_size, align_corners, scales_h, scales_w);
25132op.impl(self, output_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
25133return std::move(op.outputs_[0]).take();
25134}
25135struct structured_upsample_bicubic2d_out_cpu_out final : public at::native::structured_upsample_bicubic2d_out_cpu {
25136 structured_upsample_bicubic2d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
25137 void set_output_strided(
25138 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25139 TensorOptions options, DimnameList names
25140 ) override {
25141 const auto& out = outputs_[output_idx].get();
25142 resize_out(out, sizes, strides, options);
25143 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
25144 if (C10_UNLIKELY(maybe_proxy.has_value())) {
25145 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
25146 }
25147 if (!names.empty()) {
25148 namedinference::propagate_names(outputs_[output_idx], names);
25149 }
25150 // super must happen after, so that downstream can use maybe_get_output
25151 // to retrieve the output
25152 }
25153 void set_output_raw_strided(
25154 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25155 TensorOptions options, DimnameList names
25156 ) override {
25157 const auto& out = outputs_[output_idx].get();
25158 resize_out(out, sizes, strides, options);
25159 if (!names.empty()) {
25160 namedinference::propagate_names(outputs_[output_idx], names);
25161 }
25162 // super must happen after, so that downstream can use maybe_get_output
25163 // to retrieve the output
25164 }
25165 const Tensor& maybe_get_output(int64_t output_idx) override {
25166 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
25167 }
25168 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
25169 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
25170};
25171at::Tensor & wrapper_CPU_upsample_bicubic2d_out_out(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
25172structured_upsample_bicubic2d_out_cpu_out op(out);
25173op.meta(self, output_size, align_corners, scales_h, scales_w);
25174op.impl(self, output_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
25175if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
25176return out;
25177}
25178struct structured_upsample_bicubic2d_backward_out_cpu_functional final : public at::native::structured_upsample_bicubic2d_backward_out_cpu {
25179 void set_output_strided(
25180 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25181 TensorOptions options, DimnameList names
25182 ) override {
25183 outputs_[output_idx] = create_out(sizes, strides, options);
25184 if (!names.empty()) {
25185 namedinference::propagate_names(*outputs_[output_idx], names);
25186 }
25187 // super must happen after, so that downstream can use maybe_get_output
25188 // to retrieve the output
25189 }
25190 void set_output_raw_strided(
25191 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25192 TensorOptions options, DimnameList names
25193 ) override {
25194 outputs_[output_idx] = create_out(sizes, strides, options);
25195 if (!names.empty()) {
25196 namedinference::propagate_names(*outputs_[output_idx], names);
25197 }
25198 // super must happen after, so that downstream can use maybe_get_output
25199 // to retrieve the output
25200 }
25201 const Tensor& maybe_get_output(int64_t output_idx) override {
25202 return *outputs_[output_idx];
25203 }
25204 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
25205};
25206at::Tensor wrapper_CPU_upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
25207structured_upsample_bicubic2d_backward_out_cpu_functional op;
25208op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
25209op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
25210return std::move(op.outputs_[0]).take();
25211}
25212struct structured_upsample_bicubic2d_backward_out_cpu_out final : public at::native::structured_upsample_bicubic2d_backward_out_cpu {
25213 structured_upsample_bicubic2d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
25214 void set_output_strided(
25215 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25216 TensorOptions options, DimnameList names
25217 ) override {
25218 const auto& out = outputs_[output_idx].get();
25219 resize_out(out, sizes, strides, options);
25220 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
25221 if (C10_UNLIKELY(maybe_proxy.has_value())) {
25222 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
25223 }
25224 if (!names.empty()) {
25225 namedinference::propagate_names(outputs_[output_idx], names);
25226 }
25227 // super must happen after, so that downstream can use maybe_get_output
25228 // to retrieve the output
25229 }
25230 void set_output_raw_strided(
25231 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25232 TensorOptions options, DimnameList names
25233 ) override {
25234 const auto& out = outputs_[output_idx].get();
25235 resize_out(out, sizes, strides, options);
25236 if (!names.empty()) {
25237 namedinference::propagate_names(outputs_[output_idx], names);
25238 }
25239 // super must happen after, so that downstream can use maybe_get_output
25240 // to retrieve the output
25241 }
25242 const Tensor& maybe_get_output(int64_t output_idx) override {
25243 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
25244 }
25245 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
25246 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
25247};
25248at::Tensor & wrapper_CPU_upsample_bicubic2d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
25249structured_upsample_bicubic2d_backward_out_cpu_out op(grad_input);
25250op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
25251op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
25252if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
25253return grad_input;
25254}
25255struct structured__upsample_bicubic2d_aa_out_cpu_functional final : public at::native::structured__upsample_bicubic2d_aa_out_cpu {
25256 void set_output_strided(
25257 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25258 TensorOptions options, DimnameList names
25259 ) override {
25260 outputs_[output_idx] = create_out(sizes, strides, options);
25261 if (!names.empty()) {
25262 namedinference::propagate_names(*outputs_[output_idx], names);
25263 }
25264 // super must happen after, so that downstream can use maybe_get_output
25265 // to retrieve the output
25266 }
25267 void set_output_raw_strided(
25268 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25269 TensorOptions options, DimnameList names
25270 ) override {
25271 outputs_[output_idx] = create_out(sizes, strides, options);
25272 if (!names.empty()) {
25273 namedinference::propagate_names(*outputs_[output_idx], names);
25274 }
25275 // super must happen after, so that downstream can use maybe_get_output
25276 // to retrieve the output
25277 }
25278 const Tensor& maybe_get_output(int64_t output_idx) override {
25279 return *outputs_[output_idx];
25280 }
25281 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
25282};
25283at::Tensor wrapper_CPU__upsample_bicubic2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
25284structured__upsample_bicubic2d_aa_out_cpu_functional op;
25285op.meta(self, output_size, align_corners, scales_h, scales_w);
25286op.impl(self, output_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
25287return std::move(op.outputs_[0]).take();
25288}
25289struct structured__upsample_bicubic2d_aa_out_cpu_out final : public at::native::structured__upsample_bicubic2d_aa_out_cpu {
25290 structured__upsample_bicubic2d_aa_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
25291 void set_output_strided(
25292 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25293 TensorOptions options, DimnameList names
25294 ) override {
25295 const auto& out = outputs_[output_idx].get();
25296 resize_out(out, sizes, strides, options);
25297 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
25298 if (C10_UNLIKELY(maybe_proxy.has_value())) {
25299 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
25300 }
25301 if (!names.empty()) {
25302 namedinference::propagate_names(outputs_[output_idx], names);
25303 }
25304 // super must happen after, so that downstream can use maybe_get_output
25305 // to retrieve the output
25306 }
25307 void set_output_raw_strided(
25308 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25309 TensorOptions options, DimnameList names
25310 ) override {
25311 const auto& out = outputs_[output_idx].get();
25312 resize_out(out, sizes, strides, options);
25313 if (!names.empty()) {
25314 namedinference::propagate_names(outputs_[output_idx], names);
25315 }
25316 // super must happen after, so that downstream can use maybe_get_output
25317 // to retrieve the output
25318 }
25319 const Tensor& maybe_get_output(int64_t output_idx) override {
25320 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
25321 }
25322 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
25323 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
25324};
25325at::Tensor & wrapper_CPU__upsample_bicubic2d_aa_out_out(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
25326structured__upsample_bicubic2d_aa_out_cpu_out op(out);
25327op.meta(self, output_size, align_corners, scales_h, scales_w);
25328op.impl(self, output_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
25329if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
25330return out;
25331}
25332struct structured__upsample_bicubic2d_aa_backward_out_cpu_functional final : public at::native::structured__upsample_bicubic2d_aa_backward_out_cpu {
25333 void set_output_strided(
25334 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25335 TensorOptions options, DimnameList names
25336 ) override {
25337 outputs_[output_idx] = create_out(sizes, strides, options);
25338 if (!names.empty()) {
25339 namedinference::propagate_names(*outputs_[output_idx], names);
25340 }
25341 // super must happen after, so that downstream can use maybe_get_output
25342 // to retrieve the output
25343 }
25344 void set_output_raw_strided(
25345 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25346 TensorOptions options, DimnameList names
25347 ) override {
25348 outputs_[output_idx] = create_out(sizes, strides, options);
25349 if (!names.empty()) {
25350 namedinference::propagate_names(*outputs_[output_idx], names);
25351 }
25352 // super must happen after, so that downstream can use maybe_get_output
25353 // to retrieve the output
25354 }
25355 const Tensor& maybe_get_output(int64_t output_idx) override {
25356 return *outputs_[output_idx];
25357 }
25358 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
25359};
25360at::Tensor wrapper_CPU__upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
25361structured__upsample_bicubic2d_aa_backward_out_cpu_functional op;
25362op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
25363op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, *op.outputs_[0]);
25364return std::move(op.outputs_[0]).take();
25365}
25366struct structured__upsample_bicubic2d_aa_backward_out_cpu_out final : public at::native::structured__upsample_bicubic2d_aa_backward_out_cpu {
25367 structured__upsample_bicubic2d_aa_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
25368 void set_output_strided(
25369 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25370 TensorOptions options, DimnameList names
25371 ) override {
25372 const auto& out = outputs_[output_idx].get();
25373 resize_out(out, sizes, strides, options);
25374 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
25375 if (C10_UNLIKELY(maybe_proxy.has_value())) {
25376 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
25377 }
25378 if (!names.empty()) {
25379 namedinference::propagate_names(outputs_[output_idx], names);
25380 }
25381 // super must happen after, so that downstream can use maybe_get_output
25382 // to retrieve the output
25383 }
25384 void set_output_raw_strided(
25385 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25386 TensorOptions options, DimnameList names
25387 ) override {
25388 const auto& out = outputs_[output_idx].get();
25389 resize_out(out, sizes, strides, options);
25390 if (!names.empty()) {
25391 namedinference::propagate_names(outputs_[output_idx], names);
25392 }
25393 // super must happen after, so that downstream can use maybe_get_output
25394 // to retrieve the output
25395 }
25396 const Tensor& maybe_get_output(int64_t output_idx) override {
25397 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
25398 }
25399 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
25400 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
25401};
25402at::Tensor & wrapper_CPU__upsample_bicubic2d_aa_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
25403structured__upsample_bicubic2d_aa_backward_out_cpu_out op(grad_input);
25404op.meta(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
25405op.impl(grad_output, output_size, input_size, align_corners, scales_h, scales_w, op.maybe_get_output(0));
25406if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
25407return grad_input;
25408}
25409struct structured_upsample_trilinear3d_out_cpu_functional final : public at::native::structured_upsample_trilinear3d_out_cpu {
25410 void set_output_strided(
25411 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25412 TensorOptions options, DimnameList names
25413 ) override {
25414 outputs_[output_idx] = create_out(sizes, strides, options);
25415 if (!names.empty()) {
25416 namedinference::propagate_names(*outputs_[output_idx], names);
25417 }
25418 // super must happen after, so that downstream can use maybe_get_output
25419 // to retrieve the output
25420 }
25421 void set_output_raw_strided(
25422 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25423 TensorOptions options, DimnameList names
25424 ) override {
25425 outputs_[output_idx] = create_out(sizes, strides, options);
25426 if (!names.empty()) {
25427 namedinference::propagate_names(*outputs_[output_idx], names);
25428 }
25429 // super must happen after, so that downstream can use maybe_get_output
25430 // to retrieve the output
25431 }
25432 const Tensor& maybe_get_output(int64_t output_idx) override {
25433 return *outputs_[output_idx];
25434 }
25435 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
25436};
25437at::Tensor wrapper_CPU_upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
25438structured_upsample_trilinear3d_out_cpu_functional op;
25439op.meta(self, output_size, align_corners, scales_d, scales_h, scales_w);
25440op.impl(self, output_size, align_corners, scales_d, scales_h, scales_w, *op.outputs_[0]);
25441return std::move(op.outputs_[0]).take();
25442}
25443struct structured_upsample_trilinear3d_out_cpu_out final : public at::native::structured_upsample_trilinear3d_out_cpu {
25444 structured_upsample_trilinear3d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
25445 void set_output_strided(
25446 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25447 TensorOptions options, DimnameList names
25448 ) override {
25449 const auto& out = outputs_[output_idx].get();
25450 resize_out(out, sizes, strides, options);
25451 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
25452 if (C10_UNLIKELY(maybe_proxy.has_value())) {
25453 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
25454 }
25455 if (!names.empty()) {
25456 namedinference::propagate_names(outputs_[output_idx], names);
25457 }
25458 // super must happen after, so that downstream can use maybe_get_output
25459 // to retrieve the output
25460 }
25461 void set_output_raw_strided(
25462 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25463 TensorOptions options, DimnameList names
25464 ) override {
25465 const auto& out = outputs_[output_idx].get();
25466 resize_out(out, sizes, strides, options);
25467 if (!names.empty()) {
25468 namedinference::propagate_names(outputs_[output_idx], names);
25469 }
25470 // super must happen after, so that downstream can use maybe_get_output
25471 // to retrieve the output
25472 }
25473 const Tensor& maybe_get_output(int64_t output_idx) override {
25474 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
25475 }
25476 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
25477 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
25478};
25479at::Tensor & wrapper_CPU_upsample_trilinear3d_out_out(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
25480structured_upsample_trilinear3d_out_cpu_out op(out);
25481op.meta(self, output_size, align_corners, scales_d, scales_h, scales_w);
25482op.impl(self, output_size, align_corners, scales_d, scales_h, scales_w, op.maybe_get_output(0));
25483if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
25484return out;
25485}
25486struct structured_upsample_trilinear3d_backward_out_cpu_functional final : public at::native::structured_upsample_trilinear3d_backward_out_cpu {
25487 void set_output_strided(
25488 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25489 TensorOptions options, DimnameList names
25490 ) override {
25491 outputs_[output_idx] = create_out(sizes, strides, options);
25492 if (!names.empty()) {
25493 namedinference::propagate_names(*outputs_[output_idx], names);
25494 }
25495 // super must happen after, so that downstream can use maybe_get_output
25496 // to retrieve the output
25497 }
25498 void set_output_raw_strided(
25499 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25500 TensorOptions options, DimnameList names
25501 ) override {
25502 outputs_[output_idx] = create_out(sizes, strides, options);
25503 if (!names.empty()) {
25504 namedinference::propagate_names(*outputs_[output_idx], names);
25505 }
25506 // super must happen after, so that downstream can use maybe_get_output
25507 // to retrieve the output
25508 }
25509 const Tensor& maybe_get_output(int64_t output_idx) override {
25510 return *outputs_[output_idx];
25511 }
25512 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
25513};
25514at::Tensor wrapper_CPU_upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
25515structured_upsample_trilinear3d_backward_out_cpu_functional op;
25516op.meta(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
25517op.impl(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, *op.outputs_[0]);
25518return std::move(op.outputs_[0]).take();
25519}
25520struct structured_upsample_trilinear3d_backward_out_cpu_out final : public at::native::structured_upsample_trilinear3d_backward_out_cpu {
25521 structured_upsample_trilinear3d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
25522 void set_output_strided(
25523 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25524 TensorOptions options, DimnameList names
25525 ) override {
25526 const auto& out = outputs_[output_idx].get();
25527 resize_out(out, sizes, strides, options);
25528 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
25529 if (C10_UNLIKELY(maybe_proxy.has_value())) {
25530 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
25531 }
25532 if (!names.empty()) {
25533 namedinference::propagate_names(outputs_[output_idx], names);
25534 }
25535 // super must happen after, so that downstream can use maybe_get_output
25536 // to retrieve the output
25537 }
25538 void set_output_raw_strided(
25539 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25540 TensorOptions options, DimnameList names
25541 ) override {
25542 const auto& out = outputs_[output_idx].get();
25543 resize_out(out, sizes, strides, options);
25544 if (!names.empty()) {
25545 namedinference::propagate_names(outputs_[output_idx], names);
25546 }
25547 // super must happen after, so that downstream can use maybe_get_output
25548 // to retrieve the output
25549 }
25550 const Tensor& maybe_get_output(int64_t output_idx) override {
25551 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
25552 }
25553 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
25554 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
25555};
25556at::Tensor & wrapper_CPU_upsample_trilinear3d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
25557structured_upsample_trilinear3d_backward_out_cpu_out op(grad_input);
25558op.meta(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
25559op.impl(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, op.maybe_get_output(0));
25560if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
25561return grad_input;
25562}
25563struct structured_upsample_nearest1d_out_cpu_functional final : public at::native::structured_upsample_nearest1d_out_cpu {
25564 void set_output_strided(
25565 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25566 TensorOptions options, DimnameList names
25567 ) override {
25568 outputs_[output_idx] = create_out(sizes, strides, options);
25569 if (!names.empty()) {
25570 namedinference::propagate_names(*outputs_[output_idx], names);
25571 }
25572 // super must happen after, so that downstream can use maybe_get_output
25573 // to retrieve the output
25574 }
25575 void set_output_raw_strided(
25576 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25577 TensorOptions options, DimnameList names
25578 ) override {
25579 outputs_[output_idx] = create_out(sizes, strides, options);
25580 if (!names.empty()) {
25581 namedinference::propagate_names(*outputs_[output_idx], names);
25582 }
25583 // super must happen after, so that downstream can use maybe_get_output
25584 // to retrieve the output
25585 }
25586 const Tensor& maybe_get_output(int64_t output_idx) override {
25587 return *outputs_[output_idx];
25588 }
25589 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
25590};
25591at::Tensor wrapper_CPU_upsample_nearest1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales) {
25592structured_upsample_nearest1d_out_cpu_functional op;
25593op.meta(self, output_size, scales);
25594op.impl(self, output_size, scales, *op.outputs_[0]);
25595return std::move(op.outputs_[0]).take();
25596}
25597struct structured_upsample_nearest1d_out_cpu_out final : public at::native::structured_upsample_nearest1d_out_cpu {
25598 structured_upsample_nearest1d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
25599 void set_output_strided(
25600 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25601 TensorOptions options, DimnameList names
25602 ) override {
25603 const auto& out = outputs_[output_idx].get();
25604 resize_out(out, sizes, strides, options);
25605 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
25606 if (C10_UNLIKELY(maybe_proxy.has_value())) {
25607 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
25608 }
25609 if (!names.empty()) {
25610 namedinference::propagate_names(outputs_[output_idx], names);
25611 }
25612 // super must happen after, so that downstream can use maybe_get_output
25613 // to retrieve the output
25614 }
25615 void set_output_raw_strided(
25616 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25617 TensorOptions options, DimnameList names
25618 ) override {
25619 const auto& out = outputs_[output_idx].get();
25620 resize_out(out, sizes, strides, options);
25621 if (!names.empty()) {
25622 namedinference::propagate_names(outputs_[output_idx], names);
25623 }
25624 // super must happen after, so that downstream can use maybe_get_output
25625 // to retrieve the output
25626 }
25627 const Tensor& maybe_get_output(int64_t output_idx) override {
25628 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
25629 }
25630 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
25631 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
25632};
25633at::Tensor & wrapper_CPU_upsample_nearest1d_out_out(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
25634structured_upsample_nearest1d_out_cpu_out op(out);
25635op.meta(self, output_size, scales);
25636op.impl(self, output_size, scales, op.maybe_get_output(0));
25637if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
25638return out;
25639}
25640struct structured__upsample_nearest_exact1d_out_cpu_functional final : public at::native::structured__upsample_nearest_exact1d_out_cpu {
25641 void set_output_strided(
25642 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25643 TensorOptions options, DimnameList names
25644 ) override {
25645 outputs_[output_idx] = create_out(sizes, strides, options);
25646 if (!names.empty()) {
25647 namedinference::propagate_names(*outputs_[output_idx], names);
25648 }
25649 // super must happen after, so that downstream can use maybe_get_output
25650 // to retrieve the output
25651 }
25652 void set_output_raw_strided(
25653 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25654 TensorOptions options, DimnameList names
25655 ) override {
25656 outputs_[output_idx] = create_out(sizes, strides, options);
25657 if (!names.empty()) {
25658 namedinference::propagate_names(*outputs_[output_idx], names);
25659 }
25660 // super must happen after, so that downstream can use maybe_get_output
25661 // to retrieve the output
25662 }
25663 const Tensor& maybe_get_output(int64_t output_idx) override {
25664 return *outputs_[output_idx];
25665 }
25666 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
25667};
25668at::Tensor wrapper_CPU__upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales) {
25669structured__upsample_nearest_exact1d_out_cpu_functional op;
25670op.meta(self, output_size, scales);
25671op.impl(self, output_size, scales, *op.outputs_[0]);
25672return std::move(op.outputs_[0]).take();
25673}
25674struct structured__upsample_nearest_exact1d_out_cpu_out final : public at::native::structured__upsample_nearest_exact1d_out_cpu {
25675 structured__upsample_nearest_exact1d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
25676 void set_output_strided(
25677 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25678 TensorOptions options, DimnameList names
25679 ) override {
25680 const auto& out = outputs_[output_idx].get();
25681 resize_out(out, sizes, strides, options);
25682 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
25683 if (C10_UNLIKELY(maybe_proxy.has_value())) {
25684 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
25685 }
25686 if (!names.empty()) {
25687 namedinference::propagate_names(outputs_[output_idx], names);
25688 }
25689 // super must happen after, so that downstream can use maybe_get_output
25690 // to retrieve the output
25691 }
25692 void set_output_raw_strided(
25693 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25694 TensorOptions options, DimnameList names
25695 ) override {
25696 const auto& out = outputs_[output_idx].get();
25697 resize_out(out, sizes, strides, options);
25698 if (!names.empty()) {
25699 namedinference::propagate_names(outputs_[output_idx], names);
25700 }
25701 // super must happen after, so that downstream can use maybe_get_output
25702 // to retrieve the output
25703 }
25704 const Tensor& maybe_get_output(int64_t output_idx) override {
25705 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
25706 }
25707 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
25708 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
25709};
25710at::Tensor & wrapper_CPU__upsample_nearest_exact1d_out_out(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
25711structured__upsample_nearest_exact1d_out_cpu_out op(out);
25712op.meta(self, output_size, scales);
25713op.impl(self, output_size, scales, op.maybe_get_output(0));
25714if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
25715return out;
25716}
25717struct structured_upsample_nearest1d_backward_out_cpu_functional final : public at::native::structured_upsample_nearest1d_backward_out_cpu {
25718 void set_output_strided(
25719 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25720 TensorOptions options, DimnameList names
25721 ) override {
25722 outputs_[output_idx] = create_out(sizes, strides, options);
25723 if (!names.empty()) {
25724 namedinference::propagate_names(*outputs_[output_idx], names);
25725 }
25726 // super must happen after, so that downstream can use maybe_get_output
25727 // to retrieve the output
25728 }
25729 void set_output_raw_strided(
25730 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25731 TensorOptions options, DimnameList names
25732 ) override {
25733 outputs_[output_idx] = create_out(sizes, strides, options);
25734 if (!names.empty()) {
25735 namedinference::propagate_names(*outputs_[output_idx], names);
25736 }
25737 // super must happen after, so that downstream can use maybe_get_output
25738 // to retrieve the output
25739 }
25740 const Tensor& maybe_get_output(int64_t output_idx) override {
25741 return *outputs_[output_idx];
25742 }
25743 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
25744};
25745at::Tensor wrapper_CPU_upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales) {
25746structured_upsample_nearest1d_backward_out_cpu_functional op;
25747op.meta(grad_output, output_size, input_size, scales);
25748op.impl(grad_output, output_size, input_size, scales, *op.outputs_[0]);
25749return std::move(op.outputs_[0]).take();
25750}
25751struct structured_upsample_nearest1d_backward_out_cpu_out final : public at::native::structured_upsample_nearest1d_backward_out_cpu {
25752 structured_upsample_nearest1d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
25753 void set_output_strided(
25754 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25755 TensorOptions options, DimnameList names
25756 ) override {
25757 const auto& out = outputs_[output_idx].get();
25758 resize_out(out, sizes, strides, options);
25759 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
25760 if (C10_UNLIKELY(maybe_proxy.has_value())) {
25761 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
25762 }
25763 if (!names.empty()) {
25764 namedinference::propagate_names(outputs_[output_idx], names);
25765 }
25766 // super must happen after, so that downstream can use maybe_get_output
25767 // to retrieve the output
25768 }
25769 void set_output_raw_strided(
25770 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25771 TensorOptions options, DimnameList names
25772 ) override {
25773 const auto& out = outputs_[output_idx].get();
25774 resize_out(out, sizes, strides, options);
25775 if (!names.empty()) {
25776 namedinference::propagate_names(outputs_[output_idx], names);
25777 }
25778 // super must happen after, so that downstream can use maybe_get_output
25779 // to retrieve the output
25780 }
25781 const Tensor& maybe_get_output(int64_t output_idx) override {
25782 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
25783 }
25784 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
25785 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
25786};
25787at::Tensor & wrapper_CPU_upsample_nearest1d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
25788structured_upsample_nearest1d_backward_out_cpu_out op(grad_input);
25789op.meta(grad_output, output_size, input_size, scales);
25790op.impl(grad_output, output_size, input_size, scales, op.maybe_get_output(0));
25791if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
25792return grad_input;
25793}
25794struct structured__upsample_nearest_exact1d_backward_out_cpu_functional final : public at::native::structured__upsample_nearest_exact1d_backward_out_cpu {
25795 void set_output_strided(
25796 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25797 TensorOptions options, DimnameList names
25798 ) override {
25799 outputs_[output_idx] = create_out(sizes, strides, options);
25800 if (!names.empty()) {
25801 namedinference::propagate_names(*outputs_[output_idx], names);
25802 }
25803 // super must happen after, so that downstream can use maybe_get_output
25804 // to retrieve the output
25805 }
25806 void set_output_raw_strided(
25807 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25808 TensorOptions options, DimnameList names
25809 ) override {
25810 outputs_[output_idx] = create_out(sizes, strides, options);
25811 if (!names.empty()) {
25812 namedinference::propagate_names(*outputs_[output_idx], names);
25813 }
25814 // super must happen after, so that downstream can use maybe_get_output
25815 // to retrieve the output
25816 }
25817 const Tensor& maybe_get_output(int64_t output_idx) override {
25818 return *outputs_[output_idx];
25819 }
25820 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
25821};
25822at::Tensor wrapper_CPU__upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales) {
25823structured__upsample_nearest_exact1d_backward_out_cpu_functional op;
25824op.meta(grad_output, output_size, input_size, scales);
25825op.impl(grad_output, output_size, input_size, scales, *op.outputs_[0]);
25826return std::move(op.outputs_[0]).take();
25827}
25828struct structured__upsample_nearest_exact1d_backward_out_cpu_out final : public at::native::structured__upsample_nearest_exact1d_backward_out_cpu {
25829 structured__upsample_nearest_exact1d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
25830 void set_output_strided(
25831 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25832 TensorOptions options, DimnameList names
25833 ) override {
25834 const auto& out = outputs_[output_idx].get();
25835 resize_out(out, sizes, strides, options);
25836 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
25837 if (C10_UNLIKELY(maybe_proxy.has_value())) {
25838 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
25839 }
25840 if (!names.empty()) {
25841 namedinference::propagate_names(outputs_[output_idx], names);
25842 }
25843 // super must happen after, so that downstream can use maybe_get_output
25844 // to retrieve the output
25845 }
25846 void set_output_raw_strided(
25847 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25848 TensorOptions options, DimnameList names
25849 ) override {
25850 const auto& out = outputs_[output_idx].get();
25851 resize_out(out, sizes, strides, options);
25852 if (!names.empty()) {
25853 namedinference::propagate_names(outputs_[output_idx], names);
25854 }
25855 // super must happen after, so that downstream can use maybe_get_output
25856 // to retrieve the output
25857 }
25858 const Tensor& maybe_get_output(int64_t output_idx) override {
25859 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
25860 }
25861 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
25862 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
25863};
25864at::Tensor & wrapper_CPU__upsample_nearest_exact1d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
25865structured__upsample_nearest_exact1d_backward_out_cpu_out op(grad_input);
25866op.meta(grad_output, output_size, input_size, scales);
25867op.impl(grad_output, output_size, input_size, scales, op.maybe_get_output(0));
25868if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
25869return grad_input;
25870}
25871struct structured_upsample_nearest2d_out_cpu_functional final : public at::native::structured_upsample_nearest2d_out_cpu {
25872 void set_output_strided(
25873 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25874 TensorOptions options, DimnameList names
25875 ) override {
25876 outputs_[output_idx] = create_out(sizes, strides, options);
25877 if (!names.empty()) {
25878 namedinference::propagate_names(*outputs_[output_idx], names);
25879 }
25880 // super must happen after, so that downstream can use maybe_get_output
25881 // to retrieve the output
25882 }
25883 void set_output_raw_strided(
25884 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25885 TensorOptions options, DimnameList names
25886 ) override {
25887 outputs_[output_idx] = create_out(sizes, strides, options);
25888 if (!names.empty()) {
25889 namedinference::propagate_names(*outputs_[output_idx], names);
25890 }
25891 // super must happen after, so that downstream can use maybe_get_output
25892 // to retrieve the output
25893 }
25894 const Tensor& maybe_get_output(int64_t output_idx) override {
25895 return *outputs_[output_idx];
25896 }
25897 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
25898};
25899at::Tensor wrapper_CPU_upsample_nearest2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
25900structured_upsample_nearest2d_out_cpu_functional op;
25901op.meta(self, output_size, scales_h, scales_w);
25902op.impl(self, output_size, scales_h, scales_w, *op.outputs_[0]);
25903return std::move(op.outputs_[0]).take();
25904}
25905struct structured_upsample_nearest2d_out_cpu_out final : public at::native::structured_upsample_nearest2d_out_cpu {
25906 structured_upsample_nearest2d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
25907 void set_output_strided(
25908 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25909 TensorOptions options, DimnameList names
25910 ) override {
25911 const auto& out = outputs_[output_idx].get();
25912 resize_out(out, sizes, strides, options);
25913 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
25914 if (C10_UNLIKELY(maybe_proxy.has_value())) {
25915 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
25916 }
25917 if (!names.empty()) {
25918 namedinference::propagate_names(outputs_[output_idx], names);
25919 }
25920 // super must happen after, so that downstream can use maybe_get_output
25921 // to retrieve the output
25922 }
25923 void set_output_raw_strided(
25924 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25925 TensorOptions options, DimnameList names
25926 ) override {
25927 const auto& out = outputs_[output_idx].get();
25928 resize_out(out, sizes, strides, options);
25929 if (!names.empty()) {
25930 namedinference::propagate_names(outputs_[output_idx], names);
25931 }
25932 // super must happen after, so that downstream can use maybe_get_output
25933 // to retrieve the output
25934 }
25935 const Tensor& maybe_get_output(int64_t output_idx) override {
25936 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
25937 }
25938 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
25939 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
25940};
25941at::Tensor & wrapper_CPU_upsample_nearest2d_out_out(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
25942structured_upsample_nearest2d_out_cpu_out op(out);
25943op.meta(self, output_size, scales_h, scales_w);
25944op.impl(self, output_size, scales_h, scales_w, op.maybe_get_output(0));
25945if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
25946return out;
25947}
25948struct structured__upsample_nearest_exact2d_out_cpu_functional final : public at::native::structured__upsample_nearest_exact2d_out_cpu {
25949 void set_output_strided(
25950 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25951 TensorOptions options, DimnameList names
25952 ) override {
25953 outputs_[output_idx] = create_out(sizes, strides, options);
25954 if (!names.empty()) {
25955 namedinference::propagate_names(*outputs_[output_idx], names);
25956 }
25957 // super must happen after, so that downstream can use maybe_get_output
25958 // to retrieve the output
25959 }
25960 void set_output_raw_strided(
25961 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25962 TensorOptions options, DimnameList names
25963 ) override {
25964 outputs_[output_idx] = create_out(sizes, strides, options);
25965 if (!names.empty()) {
25966 namedinference::propagate_names(*outputs_[output_idx], names);
25967 }
25968 // super must happen after, so that downstream can use maybe_get_output
25969 // to retrieve the output
25970 }
25971 const Tensor& maybe_get_output(int64_t output_idx) override {
25972 return *outputs_[output_idx];
25973 }
25974 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
25975};
25976at::Tensor wrapper_CPU__upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
25977structured__upsample_nearest_exact2d_out_cpu_functional op;
25978op.meta(self, output_size, scales_h, scales_w);
25979op.impl(self, output_size, scales_h, scales_w, *op.outputs_[0]);
25980return std::move(op.outputs_[0]).take();
25981}
25982struct structured__upsample_nearest_exact2d_out_cpu_out final : public at::native::structured__upsample_nearest_exact2d_out_cpu {
25983 structured__upsample_nearest_exact2d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
25984 void set_output_strided(
25985 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
25986 TensorOptions options, DimnameList names
25987 ) override {
25988 const auto& out = outputs_[output_idx].get();
25989 resize_out(out, sizes, strides, options);
25990 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
25991 if (C10_UNLIKELY(maybe_proxy.has_value())) {
25992 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
25993 }
25994 if (!names.empty()) {
25995 namedinference::propagate_names(outputs_[output_idx], names);
25996 }
25997 // super must happen after, so that downstream can use maybe_get_output
25998 // to retrieve the output
25999 }
26000 void set_output_raw_strided(
26001 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26002 TensorOptions options, DimnameList names
26003 ) override {
26004 const auto& out = outputs_[output_idx].get();
26005 resize_out(out, sizes, strides, options);
26006 if (!names.empty()) {
26007 namedinference::propagate_names(outputs_[output_idx], names);
26008 }
26009 // super must happen after, so that downstream can use maybe_get_output
26010 // to retrieve the output
26011 }
26012 const Tensor& maybe_get_output(int64_t output_idx) override {
26013 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
26014 }
26015 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
26016 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
26017};
26018at::Tensor & wrapper_CPU__upsample_nearest_exact2d_out_out(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
26019structured__upsample_nearest_exact2d_out_cpu_out op(out);
26020op.meta(self, output_size, scales_h, scales_w);
26021op.impl(self, output_size, scales_h, scales_w, op.maybe_get_output(0));
26022if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
26023return out;
26024}
26025struct structured_upsample_nearest2d_backward_out_cpu_functional final : public at::native::structured_upsample_nearest2d_backward_out_cpu {
26026 void set_output_strided(
26027 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26028 TensorOptions options, DimnameList names
26029 ) override {
26030 outputs_[output_idx] = create_out(sizes, strides, options);
26031 if (!names.empty()) {
26032 namedinference::propagate_names(*outputs_[output_idx], names);
26033 }
26034 // super must happen after, so that downstream can use maybe_get_output
26035 // to retrieve the output
26036 }
26037 void set_output_raw_strided(
26038 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26039 TensorOptions options, DimnameList names
26040 ) override {
26041 outputs_[output_idx] = create_out(sizes, strides, options);
26042 if (!names.empty()) {
26043 namedinference::propagate_names(*outputs_[output_idx], names);
26044 }
26045 // super must happen after, so that downstream can use maybe_get_output
26046 // to retrieve the output
26047 }
26048 const Tensor& maybe_get_output(int64_t output_idx) override {
26049 return *outputs_[output_idx];
26050 }
26051 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
26052};
26053at::Tensor wrapper_CPU_upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
26054structured_upsample_nearest2d_backward_out_cpu_functional op;
26055op.meta(grad_output, output_size, input_size, scales_h, scales_w);
26056op.impl(grad_output, output_size, input_size, scales_h, scales_w, *op.outputs_[0]);
26057return std::move(op.outputs_[0]).take();
26058}
26059struct structured_upsample_nearest2d_backward_out_cpu_out final : public at::native::structured_upsample_nearest2d_backward_out_cpu {
26060 structured_upsample_nearest2d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
26061 void set_output_strided(
26062 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26063 TensorOptions options, DimnameList names
26064 ) override {
26065 const auto& out = outputs_[output_idx].get();
26066 resize_out(out, sizes, strides, options);
26067 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
26068 if (C10_UNLIKELY(maybe_proxy.has_value())) {
26069 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
26070 }
26071 if (!names.empty()) {
26072 namedinference::propagate_names(outputs_[output_idx], names);
26073 }
26074 // super must happen after, so that downstream can use maybe_get_output
26075 // to retrieve the output
26076 }
26077 void set_output_raw_strided(
26078 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26079 TensorOptions options, DimnameList names
26080 ) override {
26081 const auto& out = outputs_[output_idx].get();
26082 resize_out(out, sizes, strides, options);
26083 if (!names.empty()) {
26084 namedinference::propagate_names(outputs_[output_idx], names);
26085 }
26086 // super must happen after, so that downstream can use maybe_get_output
26087 // to retrieve the output
26088 }
26089 const Tensor& maybe_get_output(int64_t output_idx) override {
26090 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
26091 }
26092 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
26093 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
26094};
26095at::Tensor & wrapper_CPU_upsample_nearest2d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
26096structured_upsample_nearest2d_backward_out_cpu_out op(grad_input);
26097op.meta(grad_output, output_size, input_size, scales_h, scales_w);
26098op.impl(grad_output, output_size, input_size, scales_h, scales_w, op.maybe_get_output(0));
26099if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
26100return grad_input;
26101}
26102struct structured__upsample_nearest_exact2d_backward_out_cpu_functional final : public at::native::structured__upsample_nearest_exact2d_backward_out_cpu {
26103 void set_output_strided(
26104 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26105 TensorOptions options, DimnameList names
26106 ) override {
26107 outputs_[output_idx] = create_out(sizes, strides, options);
26108 if (!names.empty()) {
26109 namedinference::propagate_names(*outputs_[output_idx], names);
26110 }
26111 // super must happen after, so that downstream can use maybe_get_output
26112 // to retrieve the output
26113 }
26114 void set_output_raw_strided(
26115 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26116 TensorOptions options, DimnameList names
26117 ) override {
26118 outputs_[output_idx] = create_out(sizes, strides, options);
26119 if (!names.empty()) {
26120 namedinference::propagate_names(*outputs_[output_idx], names);
26121 }
26122 // super must happen after, so that downstream can use maybe_get_output
26123 // to retrieve the output
26124 }
26125 const Tensor& maybe_get_output(int64_t output_idx) override {
26126 return *outputs_[output_idx];
26127 }
26128 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
26129};
26130at::Tensor wrapper_CPU__upsample_nearest_exact2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
26131structured__upsample_nearest_exact2d_backward_out_cpu_functional op;
26132op.meta(grad_output, output_size, input_size, scales_h, scales_w);
26133op.impl(grad_output, output_size, input_size, scales_h, scales_w, *op.outputs_[0]);
26134return std::move(op.outputs_[0]).take();
26135}
26136struct structured__upsample_nearest_exact2d_backward_out_cpu_out final : public at::native::structured__upsample_nearest_exact2d_backward_out_cpu {
26137 structured__upsample_nearest_exact2d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
26138 void set_output_strided(
26139 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26140 TensorOptions options, DimnameList names
26141 ) override {
26142 const auto& out = outputs_[output_idx].get();
26143 resize_out(out, sizes, strides, options);
26144 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
26145 if (C10_UNLIKELY(maybe_proxy.has_value())) {
26146 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
26147 }
26148 if (!names.empty()) {
26149 namedinference::propagate_names(outputs_[output_idx], names);
26150 }
26151 // super must happen after, so that downstream can use maybe_get_output
26152 // to retrieve the output
26153 }
26154 void set_output_raw_strided(
26155 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26156 TensorOptions options, DimnameList names
26157 ) override {
26158 const auto& out = outputs_[output_idx].get();
26159 resize_out(out, sizes, strides, options);
26160 if (!names.empty()) {
26161 namedinference::propagate_names(outputs_[output_idx], names);
26162 }
26163 // super must happen after, so that downstream can use maybe_get_output
26164 // to retrieve the output
26165 }
26166 const Tensor& maybe_get_output(int64_t output_idx) override {
26167 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
26168 }
26169 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
26170 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
26171};
26172at::Tensor & wrapper_CPU__upsample_nearest_exact2d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
26173structured__upsample_nearest_exact2d_backward_out_cpu_out op(grad_input);
26174op.meta(grad_output, output_size, input_size, scales_h, scales_w);
26175op.impl(grad_output, output_size, input_size, scales_h, scales_w, op.maybe_get_output(0));
26176if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
26177return grad_input;
26178}
26179struct structured_upsample_nearest3d_out_cpu_functional final : public at::native::structured_upsample_nearest3d_out_cpu {
26180 void set_output_strided(
26181 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26182 TensorOptions options, DimnameList names
26183 ) override {
26184 outputs_[output_idx] = create_out(sizes, strides, options);
26185 if (!names.empty()) {
26186 namedinference::propagate_names(*outputs_[output_idx], names);
26187 }
26188 // super must happen after, so that downstream can use maybe_get_output
26189 // to retrieve the output
26190 }
26191 void set_output_raw_strided(
26192 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26193 TensorOptions options, DimnameList names
26194 ) override {
26195 outputs_[output_idx] = create_out(sizes, strides, options);
26196 if (!names.empty()) {
26197 namedinference::propagate_names(*outputs_[output_idx], names);
26198 }
26199 // super must happen after, so that downstream can use maybe_get_output
26200 // to retrieve the output
26201 }
26202 const Tensor& maybe_get_output(int64_t output_idx) override {
26203 return *outputs_[output_idx];
26204 }
26205 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
26206};
26207at::Tensor wrapper_CPU_upsample_nearest3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
26208structured_upsample_nearest3d_out_cpu_functional op;
26209op.meta(self, output_size, scales_d, scales_h, scales_w);
26210op.impl(self, output_size, scales_d, scales_h, scales_w, *op.outputs_[0]);
26211return std::move(op.outputs_[0]).take();
26212}
26213struct structured_upsample_nearest3d_out_cpu_out final : public at::native::structured_upsample_nearest3d_out_cpu {
26214 structured_upsample_nearest3d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
26215 void set_output_strided(
26216 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26217 TensorOptions options, DimnameList names
26218 ) override {
26219 const auto& out = outputs_[output_idx].get();
26220 resize_out(out, sizes, strides, options);
26221 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
26222 if (C10_UNLIKELY(maybe_proxy.has_value())) {
26223 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
26224 }
26225 if (!names.empty()) {
26226 namedinference::propagate_names(outputs_[output_idx], names);
26227 }
26228 // super must happen after, so that downstream can use maybe_get_output
26229 // to retrieve the output
26230 }
26231 void set_output_raw_strided(
26232 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26233 TensorOptions options, DimnameList names
26234 ) override {
26235 const auto& out = outputs_[output_idx].get();
26236 resize_out(out, sizes, strides, options);
26237 if (!names.empty()) {
26238 namedinference::propagate_names(outputs_[output_idx], names);
26239 }
26240 // super must happen after, so that downstream can use maybe_get_output
26241 // to retrieve the output
26242 }
26243 const Tensor& maybe_get_output(int64_t output_idx) override {
26244 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
26245 }
26246 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
26247 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
26248};
26249at::Tensor & wrapper_CPU_upsample_nearest3d_out_out(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
26250structured_upsample_nearest3d_out_cpu_out op(out);
26251op.meta(self, output_size, scales_d, scales_h, scales_w);
26252op.impl(self, output_size, scales_d, scales_h, scales_w, op.maybe_get_output(0));
26253if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
26254return out;
26255}
26256struct structured__upsample_nearest_exact3d_out_cpu_functional final : public at::native::structured__upsample_nearest_exact3d_out_cpu {
26257 void set_output_strided(
26258 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26259 TensorOptions options, DimnameList names
26260 ) override {
26261 outputs_[output_idx] = create_out(sizes, strides, options);
26262 if (!names.empty()) {
26263 namedinference::propagate_names(*outputs_[output_idx], names);
26264 }
26265 // super must happen after, so that downstream can use maybe_get_output
26266 // to retrieve the output
26267 }
26268 void set_output_raw_strided(
26269 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26270 TensorOptions options, DimnameList names
26271 ) override {
26272 outputs_[output_idx] = create_out(sizes, strides, options);
26273 if (!names.empty()) {
26274 namedinference::propagate_names(*outputs_[output_idx], names);
26275 }
26276 // super must happen after, so that downstream can use maybe_get_output
26277 // to retrieve the output
26278 }
26279 const Tensor& maybe_get_output(int64_t output_idx) override {
26280 return *outputs_[output_idx];
26281 }
26282 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
26283};
26284at::Tensor wrapper_CPU__upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
26285structured__upsample_nearest_exact3d_out_cpu_functional op;
26286op.meta(self, output_size, scales_d, scales_h, scales_w);
26287op.impl(self, output_size, scales_d, scales_h, scales_w, *op.outputs_[0]);
26288return std::move(op.outputs_[0]).take();
26289}
26290struct structured__upsample_nearest_exact3d_out_cpu_out final : public at::native::structured__upsample_nearest_exact3d_out_cpu {
26291 structured__upsample_nearest_exact3d_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
26292 void set_output_strided(
26293 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26294 TensorOptions options, DimnameList names
26295 ) override {
26296 const auto& out = outputs_[output_idx].get();
26297 resize_out(out, sizes, strides, options);
26298 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
26299 if (C10_UNLIKELY(maybe_proxy.has_value())) {
26300 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
26301 }
26302 if (!names.empty()) {
26303 namedinference::propagate_names(outputs_[output_idx], names);
26304 }
26305 // super must happen after, so that downstream can use maybe_get_output
26306 // to retrieve the output
26307 }
26308 void set_output_raw_strided(
26309 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26310 TensorOptions options, DimnameList names
26311 ) override {
26312 const auto& out = outputs_[output_idx].get();
26313 resize_out(out, sizes, strides, options);
26314 if (!names.empty()) {
26315 namedinference::propagate_names(outputs_[output_idx], names);
26316 }
26317 // super must happen after, so that downstream can use maybe_get_output
26318 // to retrieve the output
26319 }
26320 const Tensor& maybe_get_output(int64_t output_idx) override {
26321 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
26322 }
26323 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
26324 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
26325};
26326at::Tensor & wrapper_CPU__upsample_nearest_exact3d_out_out(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
26327structured__upsample_nearest_exact3d_out_cpu_out op(out);
26328op.meta(self, output_size, scales_d, scales_h, scales_w);
26329op.impl(self, output_size, scales_d, scales_h, scales_w, op.maybe_get_output(0));
26330if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
26331return out;
26332}
26333struct structured_upsample_nearest3d_backward_out_cpu_functional final : public at::native::structured_upsample_nearest3d_backward_out_cpu {
26334 void set_output_strided(
26335 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26336 TensorOptions options, DimnameList names
26337 ) override {
26338 outputs_[output_idx] = create_out(sizes, strides, options);
26339 if (!names.empty()) {
26340 namedinference::propagate_names(*outputs_[output_idx], names);
26341 }
26342 // super must happen after, so that downstream can use maybe_get_output
26343 // to retrieve the output
26344 }
26345 void set_output_raw_strided(
26346 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26347 TensorOptions options, DimnameList names
26348 ) override {
26349 outputs_[output_idx] = create_out(sizes, strides, options);
26350 if (!names.empty()) {
26351 namedinference::propagate_names(*outputs_[output_idx], names);
26352 }
26353 // super must happen after, so that downstream can use maybe_get_output
26354 // to retrieve the output
26355 }
26356 const Tensor& maybe_get_output(int64_t output_idx) override {
26357 return *outputs_[output_idx];
26358 }
26359 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
26360};
26361at::Tensor wrapper_CPU_upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
26362structured_upsample_nearest3d_backward_out_cpu_functional op;
26363op.meta(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
26364op.impl(grad_output, output_size, input_size, scales_d, scales_h, scales_w, *op.outputs_[0]);
26365return std::move(op.outputs_[0]).take();
26366}
26367struct structured_upsample_nearest3d_backward_out_cpu_out final : public at::native::structured_upsample_nearest3d_backward_out_cpu {
26368 structured_upsample_nearest3d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
26369 void set_output_strided(
26370 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26371 TensorOptions options, DimnameList names
26372 ) override {
26373 const auto& out = outputs_[output_idx].get();
26374 resize_out(out, sizes, strides, options);
26375 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
26376 if (C10_UNLIKELY(maybe_proxy.has_value())) {
26377 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
26378 }
26379 if (!names.empty()) {
26380 namedinference::propagate_names(outputs_[output_idx], names);
26381 }
26382 // super must happen after, so that downstream can use maybe_get_output
26383 // to retrieve the output
26384 }
26385 void set_output_raw_strided(
26386 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26387 TensorOptions options, DimnameList names
26388 ) override {
26389 const auto& out = outputs_[output_idx].get();
26390 resize_out(out, sizes, strides, options);
26391 if (!names.empty()) {
26392 namedinference::propagate_names(outputs_[output_idx], names);
26393 }
26394 // super must happen after, so that downstream can use maybe_get_output
26395 // to retrieve the output
26396 }
26397 const Tensor& maybe_get_output(int64_t output_idx) override {
26398 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
26399 }
26400 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
26401 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
26402};
26403at::Tensor & wrapper_CPU_upsample_nearest3d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
26404structured_upsample_nearest3d_backward_out_cpu_out op(grad_input);
26405op.meta(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
26406op.impl(grad_output, output_size, input_size, scales_d, scales_h, scales_w, op.maybe_get_output(0));
26407if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
26408return grad_input;
26409}
26410struct structured__upsample_nearest_exact3d_backward_out_cpu_functional final : public at::native::structured__upsample_nearest_exact3d_backward_out_cpu {
26411 void set_output_strided(
26412 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26413 TensorOptions options, DimnameList names
26414 ) override {
26415 outputs_[output_idx] = create_out(sizes, strides, options);
26416 if (!names.empty()) {
26417 namedinference::propagate_names(*outputs_[output_idx], names);
26418 }
26419 // super must happen after, so that downstream can use maybe_get_output
26420 // to retrieve the output
26421 }
26422 void set_output_raw_strided(
26423 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26424 TensorOptions options, DimnameList names
26425 ) override {
26426 outputs_[output_idx] = create_out(sizes, strides, options);
26427 if (!names.empty()) {
26428 namedinference::propagate_names(*outputs_[output_idx], names);
26429 }
26430 // super must happen after, so that downstream can use maybe_get_output
26431 // to retrieve the output
26432 }
26433 const Tensor& maybe_get_output(int64_t output_idx) override {
26434 return *outputs_[output_idx];
26435 }
26436 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
26437};
26438at::Tensor wrapper_CPU__upsample_nearest_exact3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
26439structured__upsample_nearest_exact3d_backward_out_cpu_functional op;
26440op.meta(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
26441op.impl(grad_output, output_size, input_size, scales_d, scales_h, scales_w, *op.outputs_[0]);
26442return std::move(op.outputs_[0]).take();
26443}
26444struct structured__upsample_nearest_exact3d_backward_out_cpu_out final : public at::native::structured__upsample_nearest_exact3d_backward_out_cpu {
26445 structured__upsample_nearest_exact3d_backward_out_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
26446 void set_output_strided(
26447 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26448 TensorOptions options, DimnameList names
26449 ) override {
26450 const auto& out = outputs_[output_idx].get();
26451 resize_out(out, sizes, strides, options);
26452 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
26453 if (C10_UNLIKELY(maybe_proxy.has_value())) {
26454 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
26455 }
26456 if (!names.empty()) {
26457 namedinference::propagate_names(outputs_[output_idx], names);
26458 }
26459 // super must happen after, so that downstream can use maybe_get_output
26460 // to retrieve the output
26461 }
26462 void set_output_raw_strided(
26463 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26464 TensorOptions options, DimnameList names
26465 ) override {
26466 const auto& out = outputs_[output_idx].get();
26467 resize_out(out, sizes, strides, options);
26468 if (!names.empty()) {
26469 namedinference::propagate_names(outputs_[output_idx], names);
26470 }
26471 // super must happen after, so that downstream can use maybe_get_output
26472 // to retrieve the output
26473 }
26474 const Tensor& maybe_get_output(int64_t output_idx) override {
26475 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
26476 }
26477 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
26478 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
26479};
26480at::Tensor & wrapper_CPU__upsample_nearest_exact3d_backward_out_grad_input(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
26481structured__upsample_nearest_exact3d_backward_out_cpu_out op(grad_input);
26482op.meta(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
26483op.impl(grad_output, output_size, input_size, scales_d, scales_h, scales_w, op.maybe_get_output(0));
26484if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
26485return grad_input;
26486}
26487struct structured_sigmoid_backward_out_functional final : public at::native::structured_sigmoid_backward_out {
26488 void set_output_strided(
26489 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26490 TensorOptions options, DimnameList names
26491 ) override {
26492 outputs_[output_idx] = create_out(sizes, strides, options);
26493 if (!names.empty()) {
26494 namedinference::propagate_names(*outputs_[output_idx], names);
26495 }
26496 // super must happen after, so that downstream can use maybe_get_output
26497 // to retrieve the output
26498 at::native::structured_sigmoid_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26499 }
26500 void set_output_raw_strided(
26501 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26502 TensorOptions options, DimnameList names
26503 ) override {
26504 outputs_[output_idx] = create_out(sizes, strides, options);
26505 if (!names.empty()) {
26506 namedinference::propagate_names(*outputs_[output_idx], names);
26507 }
26508 // super must happen after, so that downstream can use maybe_get_output
26509 // to retrieve the output
26510 at::native::structured_sigmoid_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26511 }
26512 const Tensor& maybe_get_output(int64_t output_idx) override {
26513 return *outputs_[output_idx];
26514 }
26515 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
26516};
26517at::Tensor wrapper_CPU_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & output) {
26518structured_sigmoid_backward_out_functional op;
26519op.meta(grad_output, output);
26520op.impl(grad_output, output, *op.outputs_[0]);
26521return std::move(op.outputs_[0]).take();
26522}
26523struct structured_sigmoid_backward_out_out final : public at::native::structured_sigmoid_backward_out {
26524 structured_sigmoid_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
26525 void set_output_strided(
26526 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26527 TensorOptions options, DimnameList names
26528 ) override {
26529 const auto& out = outputs_[output_idx].get();
26530 resize_out(out, sizes, strides, options);
26531 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
26532 if (C10_UNLIKELY(maybe_proxy.has_value())) {
26533 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
26534 }
26535 if (!names.empty()) {
26536 namedinference::propagate_names(outputs_[output_idx], names);
26537 }
26538 // super must happen after, so that downstream can use maybe_get_output
26539 // to retrieve the output
26540 at::native::structured_sigmoid_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26541 }
26542 void set_output_raw_strided(
26543 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26544 TensorOptions options, DimnameList names
26545 ) override {
26546 const auto& out = outputs_[output_idx].get();
26547 resize_out(out, sizes, strides, options);
26548 if (!names.empty()) {
26549 namedinference::propagate_names(outputs_[output_idx], names);
26550 }
26551 // super must happen after, so that downstream can use maybe_get_output
26552 // to retrieve the output
26553 at::native::structured_sigmoid_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26554 }
26555 const Tensor& maybe_get_output(int64_t output_idx) override {
26556 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
26557 }
26558 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
26559 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
26560};
26561at::Tensor & wrapper_CPU_sigmoid_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
26562structured_sigmoid_backward_out_out op(grad_input);
26563op.meta(grad_output, output);
26564op.impl(grad_output, output, op.maybe_get_output(0));
26565if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
26566return grad_input;
26567}
26568struct structured_logit_backward_out_functional final : public at::native::structured_logit_backward_out {
26569 void set_output_strided(
26570 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26571 TensorOptions options, DimnameList names
26572 ) override {
26573 outputs_[output_idx] = create_out(sizes, strides, options);
26574 if (!names.empty()) {
26575 namedinference::propagate_names(*outputs_[output_idx], names);
26576 }
26577 // super must happen after, so that downstream can use maybe_get_output
26578 // to retrieve the output
26579 at::native::structured_logit_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26580 }
26581 void set_output_raw_strided(
26582 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26583 TensorOptions options, DimnameList names
26584 ) override {
26585 outputs_[output_idx] = create_out(sizes, strides, options);
26586 if (!names.empty()) {
26587 namedinference::propagate_names(*outputs_[output_idx], names);
26588 }
26589 // super must happen after, so that downstream can use maybe_get_output
26590 // to retrieve the output
26591 at::native::structured_logit_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26592 }
26593 const Tensor& maybe_get_output(int64_t output_idx) override {
26594 return *outputs_[output_idx];
26595 }
26596 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
26597};
26598at::Tensor wrapper_CPU_logit_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps) {
26599structured_logit_backward_out_functional op;
26600op.meta(grad_output, self, eps);
26601op.impl(grad_output, self, eps, *op.outputs_[0]);
26602return std::move(op.outputs_[0]).take();
26603}
26604struct structured_logit_backward_out_out final : public at::native::structured_logit_backward_out {
26605 structured_logit_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
26606 void set_output_strided(
26607 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26608 TensorOptions options, DimnameList names
26609 ) override {
26610 const auto& out = outputs_[output_idx].get();
26611 resize_out(out, sizes, strides, options);
26612 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
26613 if (C10_UNLIKELY(maybe_proxy.has_value())) {
26614 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
26615 }
26616 if (!names.empty()) {
26617 namedinference::propagate_names(outputs_[output_idx], names);
26618 }
26619 // super must happen after, so that downstream can use maybe_get_output
26620 // to retrieve the output
26621 at::native::structured_logit_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26622 }
26623 void set_output_raw_strided(
26624 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26625 TensorOptions options, DimnameList names
26626 ) override {
26627 const auto& out = outputs_[output_idx].get();
26628 resize_out(out, sizes, strides, options);
26629 if (!names.empty()) {
26630 namedinference::propagate_names(outputs_[output_idx], names);
26631 }
26632 // super must happen after, so that downstream can use maybe_get_output
26633 // to retrieve the output
26634 at::native::structured_logit_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26635 }
26636 const Tensor& maybe_get_output(int64_t output_idx) override {
26637 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
26638 }
26639 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
26640 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
26641};
26642at::Tensor & wrapper_CPU_logit_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps, at::Tensor & grad_input) {
26643structured_logit_backward_out_out op(grad_input);
26644op.meta(grad_output, self, eps);
26645op.impl(grad_output, self, eps, op.maybe_get_output(0));
26646if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
26647return grad_input;
26648}
26649struct structured_tanh_backward_out_functional final : public at::native::structured_tanh_backward_out {
26650 void set_output_strided(
26651 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26652 TensorOptions options, DimnameList names
26653 ) override {
26654 outputs_[output_idx] = create_out(sizes, strides, options);
26655 if (!names.empty()) {
26656 namedinference::propagate_names(*outputs_[output_idx], names);
26657 }
26658 // super must happen after, so that downstream can use maybe_get_output
26659 // to retrieve the output
26660 at::native::structured_tanh_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26661 }
26662 void set_output_raw_strided(
26663 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26664 TensorOptions options, DimnameList names
26665 ) override {
26666 outputs_[output_idx] = create_out(sizes, strides, options);
26667 if (!names.empty()) {
26668 namedinference::propagate_names(*outputs_[output_idx], names);
26669 }
26670 // super must happen after, so that downstream can use maybe_get_output
26671 // to retrieve the output
26672 at::native::structured_tanh_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26673 }
26674 const Tensor& maybe_get_output(int64_t output_idx) override {
26675 return *outputs_[output_idx];
26676 }
26677 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
26678};
26679at::Tensor wrapper_CPU_tanh_backward(const at::Tensor & grad_output, const at::Tensor & output) {
26680structured_tanh_backward_out_functional op;
26681op.meta(grad_output, output);
26682op.impl(grad_output, output, *op.outputs_[0]);
26683return std::move(op.outputs_[0]).take();
26684}
26685struct structured_tanh_backward_out_out final : public at::native::structured_tanh_backward_out {
26686 structured_tanh_backward_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
26687 void set_output_strided(
26688 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26689 TensorOptions options, DimnameList names
26690 ) override {
26691 const auto& out = outputs_[output_idx].get();
26692 resize_out(out, sizes, strides, options);
26693 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
26694 if (C10_UNLIKELY(maybe_proxy.has_value())) {
26695 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
26696 }
26697 if (!names.empty()) {
26698 namedinference::propagate_names(outputs_[output_idx], names);
26699 }
26700 // super must happen after, so that downstream can use maybe_get_output
26701 // to retrieve the output
26702 at::native::structured_tanh_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26703 }
26704 void set_output_raw_strided(
26705 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26706 TensorOptions options, DimnameList names
26707 ) override {
26708 const auto& out = outputs_[output_idx].get();
26709 resize_out(out, sizes, strides, options);
26710 if (!names.empty()) {
26711 namedinference::propagate_names(outputs_[output_idx], names);
26712 }
26713 // super must happen after, so that downstream can use maybe_get_output
26714 // to retrieve the output
26715 at::native::structured_tanh_backward_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26716 }
26717 const Tensor& maybe_get_output(int64_t output_idx) override {
26718 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
26719 }
26720 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
26721 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
26722};
26723at::Tensor & wrapper_CPU_tanh_backward_out_grad_input(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
26724structured_tanh_backward_out_out op(grad_input);
26725op.meta(grad_output, output);
26726op.impl(grad_output, output, op.maybe_get_output(0));
26727if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
26728return grad_input;
26729}
26730struct structured_slow_conv_transpose2d_structured_cpu_functional final : public at::native::structured_slow_conv_transpose2d_structured_cpu {
26731 void set_output_strided(
26732 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26733 TensorOptions options, DimnameList names
26734 ) override {
26735 outputs_[output_idx] = create_out(sizes, strides, options);
26736 if (!names.empty()) {
26737 namedinference::propagate_names(*outputs_[output_idx], names);
26738 }
26739 // super must happen after, so that downstream can use maybe_get_output
26740 // to retrieve the output
26741 }
26742 void set_output_raw_strided(
26743 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26744 TensorOptions options, DimnameList names
26745 ) override {
26746 outputs_[output_idx] = create_out(sizes, strides, options);
26747 if (!names.empty()) {
26748 namedinference::propagate_names(*outputs_[output_idx], names);
26749 }
26750 // super must happen after, so that downstream can use maybe_get_output
26751 // to retrieve the output
26752 }
26753 const Tensor& maybe_get_output(int64_t output_idx) override {
26754 return *outputs_[output_idx];
26755 }
26756 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
26757};
26758at::Tensor wrapper_CPU_slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation) {
26759structured_slow_conv_transpose2d_structured_cpu_functional op;
26760op.meta(self, weight, kernel_size, ((bias.has_value() && (*bias).defined()) ? at::OptionalTensorRef(*bias) : at::OptionalTensorRef()), stride, padding, output_padding, dilation);
26761op.impl(self, weight, kernel_size, ((bias.has_value() && (*bias).defined()) ? at::OptionalTensorRef(*bias) : at::OptionalTensorRef()), stride, padding, output_padding, dilation, *op.outputs_[0]);
26762return std::move(op.outputs_[0]).take();
26763}
26764struct structured_slow_conv_transpose2d_structured_cpu_out final : public at::native::structured_slow_conv_transpose2d_structured_cpu {
26765 structured_slow_conv_transpose2d_structured_cpu_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
26766 void set_output_strided(
26767 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26768 TensorOptions options, DimnameList names
26769 ) override {
26770 const auto& out = outputs_[output_idx].get();
26771 resize_out(out, sizes, strides, options);
26772 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
26773 if (C10_UNLIKELY(maybe_proxy.has_value())) {
26774 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
26775 }
26776 if (!names.empty()) {
26777 namedinference::propagate_names(outputs_[output_idx], names);
26778 }
26779 // super must happen after, so that downstream can use maybe_get_output
26780 // to retrieve the output
26781 }
26782 void set_output_raw_strided(
26783 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26784 TensorOptions options, DimnameList names
26785 ) override {
26786 const auto& out = outputs_[output_idx].get();
26787 resize_out(out, sizes, strides, options);
26788 if (!names.empty()) {
26789 namedinference::propagate_names(outputs_[output_idx], names);
26790 }
26791 // super must happen after, so that downstream can use maybe_get_output
26792 // to retrieve the output
26793 }
26794 const Tensor& maybe_get_output(int64_t output_idx) override {
26795 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
26796 }
26797 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
26798 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
26799};
26800at::Tensor & wrapper_CPU_slow_conv_transpose2d_out_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
26801structured_slow_conv_transpose2d_structured_cpu_out op(out);
26802op.meta(self, weight, kernel_size, ((bias.has_value() && (*bias).defined()) ? at::OptionalTensorRef(*bias) : at::OptionalTensorRef()), stride, padding, output_padding, dilation);
26803op.impl(self, weight, kernel_size, ((bias.has_value() && (*bias).defined()) ? at::OptionalTensorRef(*bias) : at::OptionalTensorRef()), stride, padding, output_padding, dilation, op.maybe_get_output(0));
26804if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
26805return out;
26806}
26807namespace {
26808at::Tensor wrapper_CPU__slow_conv_transpose3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
26809 // No device check
26810 // DeviceGuard omitted
26811 return at::native::slow_conv_transpose3d_cpu(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding), C10_AS_INTARRAYREF_SLOW(output_padding), dilation);
26812}
26813} // anonymous namespace
26814namespace {
26815at::Tensor & wrapper_CPU_out_slow_conv_transpose3d_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
26816 // No device check
26817 // DeviceGuard omitted
26818 return at::native::slow_conv_transpose3d_out_cpu(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding), C10_AS_INTARRAYREF_SLOW(output_padding), dilation, out);
26819}
26820} // anonymous namespace
26821namespace {
26822at::Tensor wrapper_CPU___slow_conv2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
26823 // No device check
26824 // DeviceGuard omitted
26825 return at::native::slow_conv2d_forward_cpu(self, weight, kernel_size, bias, stride, padding);
26826}
26827} // anonymous namespace
26828namespace {
26829at::Tensor & wrapper_CPU_output__slow_conv2d_forward_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) {
26830 // No device check
26831 // DeviceGuard omitted
26832 return at::native::slow_conv2d_forward_out_cpu(self, weight, kernel_size, bias, stride, padding, output);
26833}
26834} // anonymous namespace
26835namespace {
26836::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_grad_input__slow_conv2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
26837 // No device check
26838 // DeviceGuard omitted
26839 return at::native::slow_conv2d_backward_out_cpu(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
26840}
26841} // anonymous namespace
26842namespace {
26843::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU_output_mask__slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
26844 // No device check
26845 // DeviceGuard omitted
26846 return at::native::slow_conv2d_backward_cpu(grad_output, self, weight, kernel_size, stride, padding, output_mask);
26847}
26848} // anonymous namespace
26849namespace {
26850at::Tensor wrapper_CPU__slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
26851 // No device check
26852 // DeviceGuard omitted
26853 return at::native::slow_conv3d_forward_cpu(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding));
26854}
26855} // anonymous namespace
26856namespace {
26857at::Tensor & wrapper_CPU_output_slow_conv3d_forward_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
26858 // No device check
26859 // DeviceGuard omitted
26860 return at::native::slow_conv3d_forward_out_cpu(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding), output);
26861}
26862} // anonymous namespace
26863namespace {
26864at::Tensor wrapper_CPU__slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
26865 // No device check
26866 // DeviceGuard omitted
26867 return at::native::slow_conv_dilated2d_cpu(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding), dilation);
26868}
26869} // anonymous namespace
26870namespace {
26871at::Tensor wrapper_CPU__slow_conv_dilated3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
26872 // No device check
26873 // DeviceGuard omitted
26874 return at::native::slow_conv_dilated3d_cpu(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding), dilation);
26875}
26876} // anonymous namespace
26877namespace {
26878at::Tensor wrapper_CPU__col2im(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
26879 // No device check
26880 // DeviceGuard omitted
26881 return at::native::col2im_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), kernel_size, dilation, padding, stride);
26882}
26883} // anonymous namespace
26884namespace {
26885at::Tensor & wrapper_CPU_out_col2im_out(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
26886 // No device check
26887 // DeviceGuard omitted
26888 return at::native::col2im_out_cpu(self, C10_AS_INTARRAYREF_SLOW(output_size), kernel_size, dilation, padding, stride, out);
26889}
26890} // anonymous namespace
26891namespace {
26892at::Tensor wrapper_CPU__im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
26893 // No device check
26894 // DeviceGuard omitted
26895 return at::native::im2col_cpu(self, kernel_size, dilation, padding, stride);
26896}
26897} // anonymous namespace
26898namespace {
26899at::Tensor & wrapper_CPU_out_im2col_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
26900 // No device check
26901 // DeviceGuard omitted
26902 return at::native::im2col_out_cpu(self, kernel_size, dilation, padding, stride, out);
26903}
26904} // anonymous namespace
26905struct structured_isposinf_out_functional final : public at::native::structured_isposinf_out {
26906 void set_output_strided(
26907 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26908 TensorOptions options, DimnameList names
26909 ) override {
26910 outputs_[output_idx] = create_out(sizes, strides, options);
26911 if (!names.empty()) {
26912 namedinference::propagate_names(*outputs_[output_idx], names);
26913 }
26914 // super must happen after, so that downstream can use maybe_get_output
26915 // to retrieve the output
26916 at::native::structured_isposinf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26917 }
26918 void set_output_raw_strided(
26919 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26920 TensorOptions options, DimnameList names
26921 ) override {
26922 outputs_[output_idx] = create_out(sizes, strides, options);
26923 if (!names.empty()) {
26924 namedinference::propagate_names(*outputs_[output_idx], names);
26925 }
26926 // super must happen after, so that downstream can use maybe_get_output
26927 // to retrieve the output
26928 at::native::structured_isposinf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26929 }
26930 const Tensor& maybe_get_output(int64_t output_idx) override {
26931 return *outputs_[output_idx];
26932 }
26933 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
26934};
26935at::Tensor wrapper_CPU_isposinf(const at::Tensor & self) {
26936structured_isposinf_out_functional op;
26937op.meta(self);
26938op.impl(self, *op.outputs_[0]);
26939return std::move(op.outputs_[0]).take();
26940}
26941struct structured_isposinf_out_out final : public at::native::structured_isposinf_out {
26942 structured_isposinf_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
26943 void set_output_strided(
26944 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26945 TensorOptions options, DimnameList names
26946 ) override {
26947 const auto& out = outputs_[output_idx].get();
26948 resize_out(out, sizes, strides, options);
26949 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
26950 if (C10_UNLIKELY(maybe_proxy.has_value())) {
26951 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
26952 }
26953 if (!names.empty()) {
26954 namedinference::propagate_names(outputs_[output_idx], names);
26955 }
26956 // super must happen after, so that downstream can use maybe_get_output
26957 // to retrieve the output
26958 at::native::structured_isposinf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26959 }
26960 void set_output_raw_strided(
26961 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26962 TensorOptions options, DimnameList names
26963 ) override {
26964 const auto& out = outputs_[output_idx].get();
26965 resize_out(out, sizes, strides, options);
26966 if (!names.empty()) {
26967 namedinference::propagate_names(outputs_[output_idx], names);
26968 }
26969 // super must happen after, so that downstream can use maybe_get_output
26970 // to retrieve the output
26971 at::native::structured_isposinf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26972 }
26973 const Tensor& maybe_get_output(int64_t output_idx) override {
26974 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
26975 }
26976 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
26977 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
26978};
26979at::Tensor & wrapper_CPU_isposinf_out_out(const at::Tensor & self, at::Tensor & out) {
26980structured_isposinf_out_out op(out);
26981op.meta(self);
26982op.impl(self, op.maybe_get_output(0));
26983if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
26984return out;
26985}
26986struct structured_isneginf_out_functional final : public at::native::structured_isneginf_out {
26987 void set_output_strided(
26988 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
26989 TensorOptions options, DimnameList names
26990 ) override {
26991 outputs_[output_idx] = create_out(sizes, strides, options);
26992 if (!names.empty()) {
26993 namedinference::propagate_names(*outputs_[output_idx], names);
26994 }
26995 // super must happen after, so that downstream can use maybe_get_output
26996 // to retrieve the output
26997 at::native::structured_isneginf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
26998 }
26999 void set_output_raw_strided(
27000 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27001 TensorOptions options, DimnameList names
27002 ) override {
27003 outputs_[output_idx] = create_out(sizes, strides, options);
27004 if (!names.empty()) {
27005 namedinference::propagate_names(*outputs_[output_idx], names);
27006 }
27007 // super must happen after, so that downstream can use maybe_get_output
27008 // to retrieve the output
27009 at::native::structured_isneginf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27010 }
27011 const Tensor& maybe_get_output(int64_t output_idx) override {
27012 return *outputs_[output_idx];
27013 }
27014 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
27015};
27016at::Tensor wrapper_CPU_isneginf(const at::Tensor & self) {
27017structured_isneginf_out_functional op;
27018op.meta(self);
27019op.impl(self, *op.outputs_[0]);
27020return std::move(op.outputs_[0]).take();
27021}
27022struct structured_isneginf_out_out final : public at::native::structured_isneginf_out {
27023 structured_isneginf_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
27024 void set_output_strided(
27025 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27026 TensorOptions options, DimnameList names
27027 ) override {
27028 const auto& out = outputs_[output_idx].get();
27029 resize_out(out, sizes, strides, options);
27030 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
27031 if (C10_UNLIKELY(maybe_proxy.has_value())) {
27032 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
27033 }
27034 if (!names.empty()) {
27035 namedinference::propagate_names(outputs_[output_idx], names);
27036 }
27037 // super must happen after, so that downstream can use maybe_get_output
27038 // to retrieve the output
27039 at::native::structured_isneginf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27040 }
27041 void set_output_raw_strided(
27042 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27043 TensorOptions options, DimnameList names
27044 ) override {
27045 const auto& out = outputs_[output_idx].get();
27046 resize_out(out, sizes, strides, options);
27047 if (!names.empty()) {
27048 namedinference::propagate_names(outputs_[output_idx], names);
27049 }
27050 // super must happen after, so that downstream can use maybe_get_output
27051 // to retrieve the output
27052 at::native::structured_isneginf_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27053 }
27054 const Tensor& maybe_get_output(int64_t output_idx) override {
27055 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
27056 }
27057 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
27058 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
27059};
27060at::Tensor & wrapper_CPU_isneginf_out_out(const at::Tensor & self, at::Tensor & out) {
27061structured_isneginf_out_out op(out);
27062op.meta(self);
27063op.impl(self, op.maybe_get_output(0));
27064if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
27065return out;
27066}
27067struct structured_special_entr_out_functional final : public at::native::structured_special_entr_out {
27068 void set_output_strided(
27069 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27070 TensorOptions options, DimnameList names
27071 ) override {
27072 outputs_[output_idx] = create_out(sizes, strides, options);
27073 if (!names.empty()) {
27074 namedinference::propagate_names(*outputs_[output_idx], names);
27075 }
27076 // super must happen after, so that downstream can use maybe_get_output
27077 // to retrieve the output
27078 at::native::structured_special_entr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27079 }
27080 void set_output_raw_strided(
27081 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27082 TensorOptions options, DimnameList names
27083 ) override {
27084 outputs_[output_idx] = create_out(sizes, strides, options);
27085 if (!names.empty()) {
27086 namedinference::propagate_names(*outputs_[output_idx], names);
27087 }
27088 // super must happen after, so that downstream can use maybe_get_output
27089 // to retrieve the output
27090 at::native::structured_special_entr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27091 }
27092 const Tensor& maybe_get_output(int64_t output_idx) override {
27093 return *outputs_[output_idx];
27094 }
27095 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
27096};
27097at::Tensor wrapper_CPU_special_entr(const at::Tensor & self) {
27098structured_special_entr_out_functional op;
27099op.meta(self);
27100op.impl(self, *op.outputs_[0]);
27101return std::move(op.outputs_[0]).take();
27102}
27103struct structured_special_entr_out_out final : public at::native::structured_special_entr_out {
27104 structured_special_entr_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
27105 void set_output_strided(
27106 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27107 TensorOptions options, DimnameList names
27108 ) override {
27109 const auto& out = outputs_[output_idx].get();
27110 resize_out(out, sizes, strides, options);
27111 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
27112 if (C10_UNLIKELY(maybe_proxy.has_value())) {
27113 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
27114 }
27115 if (!names.empty()) {
27116 namedinference::propagate_names(outputs_[output_idx], names);
27117 }
27118 // super must happen after, so that downstream can use maybe_get_output
27119 // to retrieve the output
27120 at::native::structured_special_entr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27121 }
27122 void set_output_raw_strided(
27123 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27124 TensorOptions options, DimnameList names
27125 ) override {
27126 const auto& out = outputs_[output_idx].get();
27127 resize_out(out, sizes, strides, options);
27128 if (!names.empty()) {
27129 namedinference::propagate_names(outputs_[output_idx], names);
27130 }
27131 // super must happen after, so that downstream can use maybe_get_output
27132 // to retrieve the output
27133 at::native::structured_special_entr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27134 }
27135 const Tensor& maybe_get_output(int64_t output_idx) override {
27136 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
27137 }
27138 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
27139 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
27140};
27141at::Tensor & wrapper_CPU_special_entr_out_out(const at::Tensor & self, at::Tensor & out) {
27142structured_special_entr_out_out op(out);
27143op.meta(self);
27144op.impl(self, op.maybe_get_output(0));
27145if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
27146return out;
27147}
27148struct structured_special_ndtri_out_functional final : public at::native::structured_special_ndtri_out {
27149 void set_output_strided(
27150 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27151 TensorOptions options, DimnameList names
27152 ) override {
27153 outputs_[output_idx] = create_out(sizes, strides, options);
27154 if (!names.empty()) {
27155 namedinference::propagate_names(*outputs_[output_idx], names);
27156 }
27157 // super must happen after, so that downstream can use maybe_get_output
27158 // to retrieve the output
27159 at::native::structured_special_ndtri_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27160 }
27161 void set_output_raw_strided(
27162 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27163 TensorOptions options, DimnameList names
27164 ) override {
27165 outputs_[output_idx] = create_out(sizes, strides, options);
27166 if (!names.empty()) {
27167 namedinference::propagate_names(*outputs_[output_idx], names);
27168 }
27169 // super must happen after, so that downstream can use maybe_get_output
27170 // to retrieve the output
27171 at::native::structured_special_ndtri_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27172 }
27173 const Tensor& maybe_get_output(int64_t output_idx) override {
27174 return *outputs_[output_idx];
27175 }
27176 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
27177};
27178at::Tensor wrapper_CPU_special_ndtri(const at::Tensor & self) {
27179structured_special_ndtri_out_functional op;
27180op.meta(self);
27181op.impl(self, *op.outputs_[0]);
27182return std::move(op.outputs_[0]).take();
27183}
27184struct structured_special_ndtri_out_out final : public at::native::structured_special_ndtri_out {
27185 structured_special_ndtri_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
27186 void set_output_strided(
27187 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27188 TensorOptions options, DimnameList names
27189 ) override {
27190 const auto& out = outputs_[output_idx].get();
27191 resize_out(out, sizes, strides, options);
27192 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
27193 if (C10_UNLIKELY(maybe_proxy.has_value())) {
27194 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
27195 }
27196 if (!names.empty()) {
27197 namedinference::propagate_names(outputs_[output_idx], names);
27198 }
27199 // super must happen after, so that downstream can use maybe_get_output
27200 // to retrieve the output
27201 at::native::structured_special_ndtri_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27202 }
27203 void set_output_raw_strided(
27204 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27205 TensorOptions options, DimnameList names
27206 ) override {
27207 const auto& out = outputs_[output_idx].get();
27208 resize_out(out, sizes, strides, options);
27209 if (!names.empty()) {
27210 namedinference::propagate_names(outputs_[output_idx], names);
27211 }
27212 // super must happen after, so that downstream can use maybe_get_output
27213 // to retrieve the output
27214 at::native::structured_special_ndtri_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27215 }
27216 const Tensor& maybe_get_output(int64_t output_idx) override {
27217 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
27218 }
27219 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
27220 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
27221};
27222at::Tensor & wrapper_CPU_special_ndtri_out_out(const at::Tensor & self, at::Tensor & out) {
27223structured_special_ndtri_out_out op(out);
27224op.meta(self);
27225op.impl(self, op.maybe_get_output(0));
27226if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
27227return out;
27228}
27229struct structured_special_log_ndtr_out_functional final : public at::native::structured_special_log_ndtr_out {
27230 void set_output_strided(
27231 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27232 TensorOptions options, DimnameList names
27233 ) override {
27234 outputs_[output_idx] = create_out(sizes, strides, options);
27235 if (!names.empty()) {
27236 namedinference::propagate_names(*outputs_[output_idx], names);
27237 }
27238 // super must happen after, so that downstream can use maybe_get_output
27239 // to retrieve the output
27240 at::native::structured_special_log_ndtr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27241 }
27242 void set_output_raw_strided(
27243 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27244 TensorOptions options, DimnameList names
27245 ) override {
27246 outputs_[output_idx] = create_out(sizes, strides, options);
27247 if (!names.empty()) {
27248 namedinference::propagate_names(*outputs_[output_idx], names);
27249 }
27250 // super must happen after, so that downstream can use maybe_get_output
27251 // to retrieve the output
27252 at::native::structured_special_log_ndtr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27253 }
27254 const Tensor& maybe_get_output(int64_t output_idx) override {
27255 return *outputs_[output_idx];
27256 }
27257 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
27258};
27259at::Tensor wrapper_CPU_special_log_ndtr(const at::Tensor & self) {
27260structured_special_log_ndtr_out_functional op;
27261op.meta(self);
27262op.impl(self, *op.outputs_[0]);
27263return std::move(op.outputs_[0]).take();
27264}
27265struct structured_special_log_ndtr_out_out final : public at::native::structured_special_log_ndtr_out {
27266 structured_special_log_ndtr_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
27267 void set_output_strided(
27268 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27269 TensorOptions options, DimnameList names
27270 ) override {
27271 const auto& out = outputs_[output_idx].get();
27272 resize_out(out, sizes, strides, options);
27273 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
27274 if (C10_UNLIKELY(maybe_proxy.has_value())) {
27275 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
27276 }
27277 if (!names.empty()) {
27278 namedinference::propagate_names(outputs_[output_idx], names);
27279 }
27280 // super must happen after, so that downstream can use maybe_get_output
27281 // to retrieve the output
27282 at::native::structured_special_log_ndtr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27283 }
27284 void set_output_raw_strided(
27285 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27286 TensorOptions options, DimnameList names
27287 ) override {
27288 const auto& out = outputs_[output_idx].get();
27289 resize_out(out, sizes, strides, options);
27290 if (!names.empty()) {
27291 namedinference::propagate_names(outputs_[output_idx], names);
27292 }
27293 // super must happen after, so that downstream can use maybe_get_output
27294 // to retrieve the output
27295 at::native::structured_special_log_ndtr_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27296 }
27297 const Tensor& maybe_get_output(int64_t output_idx) override {
27298 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
27299 }
27300 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
27301 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
27302};
27303at::Tensor & wrapper_CPU_special_log_ndtr_out_out(const at::Tensor & self, at::Tensor & out) {
27304structured_special_log_ndtr_out_out op(out);
27305op.meta(self);
27306op.impl(self, op.maybe_get_output(0));
27307if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
27308return out;
27309}
27310struct structured_special_erfcx_out_functional final : public at::native::structured_special_erfcx_out {
27311 void set_output_strided(
27312 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27313 TensorOptions options, DimnameList names
27314 ) override {
27315 outputs_[output_idx] = create_out(sizes, strides, options);
27316 if (!names.empty()) {
27317 namedinference::propagate_names(*outputs_[output_idx], names);
27318 }
27319 // super must happen after, so that downstream can use maybe_get_output
27320 // to retrieve the output
27321 at::native::structured_special_erfcx_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27322 }
27323 void set_output_raw_strided(
27324 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27325 TensorOptions options, DimnameList names
27326 ) override {
27327 outputs_[output_idx] = create_out(sizes, strides, options);
27328 if (!names.empty()) {
27329 namedinference::propagate_names(*outputs_[output_idx], names);
27330 }
27331 // super must happen after, so that downstream can use maybe_get_output
27332 // to retrieve the output
27333 at::native::structured_special_erfcx_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27334 }
27335 const Tensor& maybe_get_output(int64_t output_idx) override {
27336 return *outputs_[output_idx];
27337 }
27338 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
27339};
27340at::Tensor wrapper_CPU_special_erfcx(const at::Tensor & self) {
27341structured_special_erfcx_out_functional op;
27342op.meta(self);
27343op.impl(self, *op.outputs_[0]);
27344return std::move(op.outputs_[0]).take();
27345}
27346struct structured_special_erfcx_out_out final : public at::native::structured_special_erfcx_out {
27347 structured_special_erfcx_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
27348 void set_output_strided(
27349 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27350 TensorOptions options, DimnameList names
27351 ) override {
27352 const auto& out = outputs_[output_idx].get();
27353 resize_out(out, sizes, strides, options);
27354 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
27355 if (C10_UNLIKELY(maybe_proxy.has_value())) {
27356 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
27357 }
27358 if (!names.empty()) {
27359 namedinference::propagate_names(outputs_[output_idx], names);
27360 }
27361 // super must happen after, so that downstream can use maybe_get_output
27362 // to retrieve the output
27363 at::native::structured_special_erfcx_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27364 }
27365 void set_output_raw_strided(
27366 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27367 TensorOptions options, DimnameList names
27368 ) override {
27369 const auto& out = outputs_[output_idx].get();
27370 resize_out(out, sizes, strides, options);
27371 if (!names.empty()) {
27372 namedinference::propagate_names(outputs_[output_idx], names);
27373 }
27374 // super must happen after, so that downstream can use maybe_get_output
27375 // to retrieve the output
27376 at::native::structured_special_erfcx_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27377 }
27378 const Tensor& maybe_get_output(int64_t output_idx) override {
27379 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
27380 }
27381 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
27382 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
27383};
27384at::Tensor & wrapper_CPU_special_erfcx_out_out(const at::Tensor & self, at::Tensor & out) {
27385structured_special_erfcx_out_out op(out);
27386op.meta(self);
27387op.impl(self, op.maybe_get_output(0));
27388if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
27389return out;
27390}
27391struct structured_special_xlog1py_out_functional final : public at::native::structured_special_xlog1py_out {
27392 void set_output_strided(
27393 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27394 TensorOptions options, DimnameList names
27395 ) override {
27396 outputs_[output_idx] = create_out(sizes, strides, options);
27397 if (!names.empty()) {
27398 namedinference::propagate_names(*outputs_[output_idx], names);
27399 }
27400 // super must happen after, so that downstream can use maybe_get_output
27401 // to retrieve the output
27402 at::native::structured_special_xlog1py_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27403 }
27404 void set_output_raw_strided(
27405 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27406 TensorOptions options, DimnameList names
27407 ) override {
27408 outputs_[output_idx] = create_out(sizes, strides, options);
27409 if (!names.empty()) {
27410 namedinference::propagate_names(*outputs_[output_idx], names);
27411 }
27412 // super must happen after, so that downstream can use maybe_get_output
27413 // to retrieve the output
27414 at::native::structured_special_xlog1py_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27415 }
27416 const Tensor& maybe_get_output(int64_t output_idx) override {
27417 return *outputs_[output_idx];
27418 }
27419 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
27420};
27421at::Tensor wrapper_CPU_special_xlog1py(const at::Tensor & self, const at::Tensor & other) {
27422structured_special_xlog1py_out_functional op;
27423op.meta(self, other);
27424op.impl(self, other, *op.outputs_[0]);
27425return std::move(op.outputs_[0]).take();
27426}
27427struct structured_special_xlog1py_out_out final : public at::native::structured_special_xlog1py_out {
27428 structured_special_xlog1py_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
27429 void set_output_strided(
27430 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27431 TensorOptions options, DimnameList names
27432 ) override {
27433 const auto& out = outputs_[output_idx].get();
27434 resize_out(out, sizes, strides, options);
27435 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
27436 if (C10_UNLIKELY(maybe_proxy.has_value())) {
27437 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
27438 }
27439 if (!names.empty()) {
27440 namedinference::propagate_names(outputs_[output_idx], names);
27441 }
27442 // super must happen after, so that downstream can use maybe_get_output
27443 // to retrieve the output
27444 at::native::structured_special_xlog1py_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27445 }
27446 void set_output_raw_strided(
27447 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27448 TensorOptions options, DimnameList names
27449 ) override {
27450 const auto& out = outputs_[output_idx].get();
27451 resize_out(out, sizes, strides, options);
27452 if (!names.empty()) {
27453 namedinference::propagate_names(outputs_[output_idx], names);
27454 }
27455 // super must happen after, so that downstream can use maybe_get_output
27456 // to retrieve the output
27457 at::native::structured_special_xlog1py_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27458 }
27459 const Tensor& maybe_get_output(int64_t output_idx) override {
27460 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
27461 }
27462 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
27463 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
27464};
27465at::Tensor & wrapper_CPU_special_xlog1py_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
27466structured_special_xlog1py_out_out op(out);
27467op.meta(self, other);
27468op.impl(self, other, op.maybe_get_output(0));
27469if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
27470return out;
27471}
27472struct structured_special_zeta_out_functional final : public at::native::structured_special_zeta_out {
27473 void set_output_strided(
27474 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27475 TensorOptions options, DimnameList names
27476 ) override {
27477 outputs_[output_idx] = create_out(sizes, strides, options);
27478 if (!names.empty()) {
27479 namedinference::propagate_names(*outputs_[output_idx], names);
27480 }
27481 // super must happen after, so that downstream can use maybe_get_output
27482 // to retrieve the output
27483 at::native::structured_special_zeta_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27484 }
27485 void set_output_raw_strided(
27486 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27487 TensorOptions options, DimnameList names
27488 ) override {
27489 outputs_[output_idx] = create_out(sizes, strides, options);
27490 if (!names.empty()) {
27491 namedinference::propagate_names(*outputs_[output_idx], names);
27492 }
27493 // super must happen after, so that downstream can use maybe_get_output
27494 // to retrieve the output
27495 at::native::structured_special_zeta_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27496 }
27497 const Tensor& maybe_get_output(int64_t output_idx) override {
27498 return *outputs_[output_idx];
27499 }
27500 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
27501};
27502at::Tensor wrapper_CPU_special_zeta(const at::Tensor & self, const at::Tensor & other) {
27503structured_special_zeta_out_functional op;
27504op.meta(self, other);
27505op.impl(self, other, *op.outputs_[0]);
27506return std::move(op.outputs_[0]).take();
27507}
27508struct structured_special_zeta_out_out final : public at::native::structured_special_zeta_out {
27509 structured_special_zeta_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
27510 void set_output_strided(
27511 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27512 TensorOptions options, DimnameList names
27513 ) override {
27514 const auto& out = outputs_[output_idx].get();
27515 resize_out(out, sizes, strides, options);
27516 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
27517 if (C10_UNLIKELY(maybe_proxy.has_value())) {
27518 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
27519 }
27520 if (!names.empty()) {
27521 namedinference::propagate_names(outputs_[output_idx], names);
27522 }
27523 // super must happen after, so that downstream can use maybe_get_output
27524 // to retrieve the output
27525 at::native::structured_special_zeta_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27526 }
27527 void set_output_raw_strided(
27528 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27529 TensorOptions options, DimnameList names
27530 ) override {
27531 const auto& out = outputs_[output_idx].get();
27532 resize_out(out, sizes, strides, options);
27533 if (!names.empty()) {
27534 namedinference::propagate_names(outputs_[output_idx], names);
27535 }
27536 // super must happen after, so that downstream can use maybe_get_output
27537 // to retrieve the output
27538 at::native::structured_special_zeta_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27539 }
27540 const Tensor& maybe_get_output(int64_t output_idx) override {
27541 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
27542 }
27543 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
27544 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
27545};
27546at::Tensor & wrapper_CPU_special_zeta_out_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
27547structured_special_zeta_out_out op(out);
27548op.meta(self, other);
27549op.impl(self, other, op.maybe_get_output(0));
27550if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
27551return out;
27552}
27553struct structured_special_i0e_out_functional final : public at::native::structured_special_i0e_out {
27554 void set_output_strided(
27555 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27556 TensorOptions options, DimnameList names
27557 ) override {
27558 outputs_[output_idx] = create_out(sizes, strides, options);
27559 if (!names.empty()) {
27560 namedinference::propagate_names(*outputs_[output_idx], names);
27561 }
27562 // super must happen after, so that downstream can use maybe_get_output
27563 // to retrieve the output
27564 at::native::structured_special_i0e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27565 }
27566 void set_output_raw_strided(
27567 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27568 TensorOptions options, DimnameList names
27569 ) override {
27570 outputs_[output_idx] = create_out(sizes, strides, options);
27571 if (!names.empty()) {
27572 namedinference::propagate_names(*outputs_[output_idx], names);
27573 }
27574 // super must happen after, so that downstream can use maybe_get_output
27575 // to retrieve the output
27576 at::native::structured_special_i0e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27577 }
27578 const Tensor& maybe_get_output(int64_t output_idx) override {
27579 return *outputs_[output_idx];
27580 }
27581 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
27582};
27583at::Tensor wrapper_CPU_special_i0e(const at::Tensor & self) {
27584structured_special_i0e_out_functional op;
27585op.meta(self);
27586op.impl(self, *op.outputs_[0]);
27587return std::move(op.outputs_[0]).take();
27588}
27589struct structured_special_i0e_out_out final : public at::native::structured_special_i0e_out {
27590 structured_special_i0e_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
27591 void set_output_strided(
27592 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27593 TensorOptions options, DimnameList names
27594 ) override {
27595 const auto& out = outputs_[output_idx].get();
27596 resize_out(out, sizes, strides, options);
27597 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
27598 if (C10_UNLIKELY(maybe_proxy.has_value())) {
27599 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
27600 }
27601 if (!names.empty()) {
27602 namedinference::propagate_names(outputs_[output_idx], names);
27603 }
27604 // super must happen after, so that downstream can use maybe_get_output
27605 // to retrieve the output
27606 at::native::structured_special_i0e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27607 }
27608 void set_output_raw_strided(
27609 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27610 TensorOptions options, DimnameList names
27611 ) override {
27612 const auto& out = outputs_[output_idx].get();
27613 resize_out(out, sizes, strides, options);
27614 if (!names.empty()) {
27615 namedinference::propagate_names(outputs_[output_idx], names);
27616 }
27617 // super must happen after, so that downstream can use maybe_get_output
27618 // to retrieve the output
27619 at::native::structured_special_i0e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27620 }
27621 const Tensor& maybe_get_output(int64_t output_idx) override {
27622 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
27623 }
27624 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
27625 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
27626};
27627at::Tensor & wrapper_CPU_special_i0e_out_out(const at::Tensor & self, at::Tensor & out) {
27628structured_special_i0e_out_out op(out);
27629op.meta(self);
27630op.impl(self, op.maybe_get_output(0));
27631if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
27632return out;
27633}
27634struct structured_special_i1_out_functional final : public at::native::structured_special_i1_out {
27635 void set_output_strided(
27636 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27637 TensorOptions options, DimnameList names
27638 ) override {
27639 outputs_[output_idx] = create_out(sizes, strides, options);
27640 if (!names.empty()) {
27641 namedinference::propagate_names(*outputs_[output_idx], names);
27642 }
27643 // super must happen after, so that downstream can use maybe_get_output
27644 // to retrieve the output
27645 at::native::structured_special_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27646 }
27647 void set_output_raw_strided(
27648 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27649 TensorOptions options, DimnameList names
27650 ) override {
27651 outputs_[output_idx] = create_out(sizes, strides, options);
27652 if (!names.empty()) {
27653 namedinference::propagate_names(*outputs_[output_idx], names);
27654 }
27655 // super must happen after, so that downstream can use maybe_get_output
27656 // to retrieve the output
27657 at::native::structured_special_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27658 }
27659 const Tensor& maybe_get_output(int64_t output_idx) override {
27660 return *outputs_[output_idx];
27661 }
27662 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
27663};
27664at::Tensor wrapper_CPU_special_i1(const at::Tensor & self) {
27665structured_special_i1_out_functional op;
27666op.meta(self);
27667op.impl(self, *op.outputs_[0]);
27668return std::move(op.outputs_[0]).take();
27669}
27670struct structured_special_i1_out_out final : public at::native::structured_special_i1_out {
27671 structured_special_i1_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
27672 void set_output_strided(
27673 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27674 TensorOptions options, DimnameList names
27675 ) override {
27676 const auto& out = outputs_[output_idx].get();
27677 resize_out(out, sizes, strides, options);
27678 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
27679 if (C10_UNLIKELY(maybe_proxy.has_value())) {
27680 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
27681 }
27682 if (!names.empty()) {
27683 namedinference::propagate_names(outputs_[output_idx], names);
27684 }
27685 // super must happen after, so that downstream can use maybe_get_output
27686 // to retrieve the output
27687 at::native::structured_special_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27688 }
27689 void set_output_raw_strided(
27690 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27691 TensorOptions options, DimnameList names
27692 ) override {
27693 const auto& out = outputs_[output_idx].get();
27694 resize_out(out, sizes, strides, options);
27695 if (!names.empty()) {
27696 namedinference::propagate_names(outputs_[output_idx], names);
27697 }
27698 // super must happen after, so that downstream can use maybe_get_output
27699 // to retrieve the output
27700 at::native::structured_special_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27701 }
27702 const Tensor& maybe_get_output(int64_t output_idx) override {
27703 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
27704 }
27705 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
27706 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
27707};
27708at::Tensor & wrapper_CPU_special_i1_out_out(const at::Tensor & self, at::Tensor & out) {
27709structured_special_i1_out_out op(out);
27710op.meta(self);
27711op.impl(self, op.maybe_get_output(0));
27712if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
27713return out;
27714}
27715struct structured_special_i1e_out_functional final : public at::native::structured_special_i1e_out {
27716 void set_output_strided(
27717 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27718 TensorOptions options, DimnameList names
27719 ) override {
27720 outputs_[output_idx] = create_out(sizes, strides, options);
27721 if (!names.empty()) {
27722 namedinference::propagate_names(*outputs_[output_idx], names);
27723 }
27724 // super must happen after, so that downstream can use maybe_get_output
27725 // to retrieve the output
27726 at::native::structured_special_i1e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27727 }
27728 void set_output_raw_strided(
27729 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27730 TensorOptions options, DimnameList names
27731 ) override {
27732 outputs_[output_idx] = create_out(sizes, strides, options);
27733 if (!names.empty()) {
27734 namedinference::propagate_names(*outputs_[output_idx], names);
27735 }
27736 // super must happen after, so that downstream can use maybe_get_output
27737 // to retrieve the output
27738 at::native::structured_special_i1e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27739 }
27740 const Tensor& maybe_get_output(int64_t output_idx) override {
27741 return *outputs_[output_idx];
27742 }
27743 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
27744};
27745at::Tensor wrapper_CPU_special_i1e(const at::Tensor & self) {
27746structured_special_i1e_out_functional op;
27747op.meta(self);
27748op.impl(self, *op.outputs_[0]);
27749return std::move(op.outputs_[0]).take();
27750}
27751struct structured_special_i1e_out_out final : public at::native::structured_special_i1e_out {
27752 structured_special_i1e_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
27753 void set_output_strided(
27754 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27755 TensorOptions options, DimnameList names
27756 ) override {
27757 const auto& out = outputs_[output_idx].get();
27758 resize_out(out, sizes, strides, options);
27759 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
27760 if (C10_UNLIKELY(maybe_proxy.has_value())) {
27761 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
27762 }
27763 if (!names.empty()) {
27764 namedinference::propagate_names(outputs_[output_idx], names);
27765 }
27766 // super must happen after, so that downstream can use maybe_get_output
27767 // to retrieve the output
27768 at::native::structured_special_i1e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27769 }
27770 void set_output_raw_strided(
27771 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27772 TensorOptions options, DimnameList names
27773 ) override {
27774 const auto& out = outputs_[output_idx].get();
27775 resize_out(out, sizes, strides, options);
27776 if (!names.empty()) {
27777 namedinference::propagate_names(outputs_[output_idx], names);
27778 }
27779 // super must happen after, so that downstream can use maybe_get_output
27780 // to retrieve the output
27781 at::native::structured_special_i1e_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
27782 }
27783 const Tensor& maybe_get_output(int64_t output_idx) override {
27784 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
27785 }
27786 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
27787 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
27788};
27789at::Tensor & wrapper_CPU_special_i1e_out_out(const at::Tensor & self, at::Tensor & out) {
27790structured_special_i1e_out_out op(out);
27791op.meta(self);
27792op.impl(self, op.maybe_get_output(0));
27793if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
27794return out;
27795}
27796struct structured_linalg_cholesky_ex_out_functional final : public at::native::structured_linalg_cholesky_ex_out {
27797 void set_output_strided(
27798 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27799 TensorOptions options, DimnameList names
27800 ) override {
27801 outputs_[output_idx] = create_out(sizes, strides, options);
27802 if (!names.empty()) {
27803 namedinference::propagate_names(*outputs_[output_idx], names);
27804 }
27805 // super must happen after, so that downstream can use maybe_get_output
27806 // to retrieve the output
27807 }
27808 void set_output_raw_strided(
27809 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27810 TensorOptions options, DimnameList names
27811 ) override {
27812 outputs_[output_idx] = create_out(sizes, strides, options);
27813 if (!names.empty()) {
27814 namedinference::propagate_names(*outputs_[output_idx], names);
27815 }
27816 // super must happen after, so that downstream can use maybe_get_output
27817 // to retrieve the output
27818 }
27819 const Tensor& maybe_get_output(int64_t output_idx) override {
27820 return *outputs_[output_idx];
27821 }
27822 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
27823};
27824::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_linalg_cholesky_ex(const at::Tensor & self, bool upper, bool check_errors) {
27825structured_linalg_cholesky_ex_out_functional op;
27826op.meta(self, upper, check_errors);
27827op.impl(self, upper, check_errors, *op.outputs_[0], *op.outputs_[1]);
27828return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
27829}
27830struct structured_linalg_cholesky_ex_out_out final : public at::native::structured_linalg_cholesky_ex_out {
27831 structured_linalg_cholesky_ex_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
27832 void set_output_strided(
27833 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27834 TensorOptions options, DimnameList names
27835 ) override {
27836 const auto& out = outputs_[output_idx].get();
27837 resize_out(out, sizes, strides, options);
27838 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
27839 if (C10_UNLIKELY(maybe_proxy.has_value())) {
27840 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
27841 }
27842 if (!names.empty()) {
27843 namedinference::propagate_names(outputs_[output_idx], names);
27844 }
27845 // super must happen after, so that downstream can use maybe_get_output
27846 // to retrieve the output
27847 }
27848 void set_output_raw_strided(
27849 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27850 TensorOptions options, DimnameList names
27851 ) override {
27852 const auto& out = outputs_[output_idx].get();
27853 resize_out(out, sizes, strides, options);
27854 if (!names.empty()) {
27855 namedinference::propagate_names(outputs_[output_idx], names);
27856 }
27857 // super must happen after, so that downstream can use maybe_get_output
27858 // to retrieve the output
27859 }
27860 const Tensor& maybe_get_output(int64_t output_idx) override {
27861 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
27862 }
27863 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
27864 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
27865};
27866::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_linalg_cholesky_ex_out_L(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) {
27867structured_linalg_cholesky_ex_out_out op(L, info);
27868op.meta(self, upper, check_errors);
27869op.impl(self, upper, check_errors, op.maybe_get_output(0), op.maybe_get_output(1));
27870if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
27871if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
27872return std::forward_as_tuple(L, info);
27873}
27874struct structured_linalg_cross_out_functional final : public at::native::structured_linalg_cross_out {
27875 void set_output_strided(
27876 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27877 TensorOptions options, DimnameList names
27878 ) override {
27879 outputs_[output_idx] = create_out(sizes, strides, options);
27880 if (!names.empty()) {
27881 namedinference::propagate_names(*outputs_[output_idx], names);
27882 }
27883 // super must happen after, so that downstream can use maybe_get_output
27884 // to retrieve the output
27885 }
27886 void set_output_raw_strided(
27887 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27888 TensorOptions options, DimnameList names
27889 ) override {
27890 outputs_[output_idx] = create_out(sizes, strides, options);
27891 if (!names.empty()) {
27892 namedinference::propagate_names(*outputs_[output_idx], names);
27893 }
27894 // super must happen after, so that downstream can use maybe_get_output
27895 // to retrieve the output
27896 }
27897 const Tensor& maybe_get_output(int64_t output_idx) override {
27898 return *outputs_[output_idx];
27899 }
27900 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
27901};
27902at::Tensor wrapper_CPU_linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim) {
27903structured_linalg_cross_out_functional op;
27904op.meta(self, other, dim);
27905op.impl(self, other, dim, *op.outputs_[0]);
27906return std::move(op.outputs_[0]).take();
27907}
27908struct structured_linalg_cross_out_out final : public at::native::structured_linalg_cross_out {
27909 structured_linalg_cross_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
27910 void set_output_strided(
27911 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27912 TensorOptions options, DimnameList names
27913 ) override {
27914 const auto& out = outputs_[output_idx].get();
27915 resize_out(out, sizes, strides, options);
27916 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
27917 if (C10_UNLIKELY(maybe_proxy.has_value())) {
27918 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
27919 }
27920 if (!names.empty()) {
27921 namedinference::propagate_names(outputs_[output_idx], names);
27922 }
27923 // super must happen after, so that downstream can use maybe_get_output
27924 // to retrieve the output
27925 }
27926 void set_output_raw_strided(
27927 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27928 TensorOptions options, DimnameList names
27929 ) override {
27930 const auto& out = outputs_[output_idx].get();
27931 resize_out(out, sizes, strides, options);
27932 if (!names.empty()) {
27933 namedinference::propagate_names(outputs_[output_idx], names);
27934 }
27935 // super must happen after, so that downstream can use maybe_get_output
27936 // to retrieve the output
27937 }
27938 const Tensor& maybe_get_output(int64_t output_idx) override {
27939 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
27940 }
27941 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
27942 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
27943};
27944at::Tensor & wrapper_CPU_linalg_cross_out_out(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out) {
27945structured_linalg_cross_out_out op(out);
27946op.meta(self, other, dim);
27947op.impl(self, other, dim, op.maybe_get_output(0));
27948if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
27949return out;
27950}
27951struct structured_linalg_lu_factor_ex_out_functional final : public at::native::structured_linalg_lu_factor_ex_out {
27952 void set_output_strided(
27953 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27954 TensorOptions options, DimnameList names
27955 ) override {
27956 outputs_[output_idx] = create_out(sizes, strides, options);
27957 if (!names.empty()) {
27958 namedinference::propagate_names(*outputs_[output_idx], names);
27959 }
27960 // super must happen after, so that downstream can use maybe_get_output
27961 // to retrieve the output
27962 }
27963 void set_output_raw_strided(
27964 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27965 TensorOptions options, DimnameList names
27966 ) override {
27967 outputs_[output_idx] = create_out(sizes, strides, options);
27968 if (!names.empty()) {
27969 namedinference::propagate_names(*outputs_[output_idx], names);
27970 }
27971 // super must happen after, so that downstream can use maybe_get_output
27972 // to retrieve the output
27973 }
27974 const Tensor& maybe_get_output(int64_t output_idx) override {
27975 return *outputs_[output_idx];
27976 }
27977 std::array<c10::ExclusivelyOwned<Tensor>, 3> outputs_;
27978};
27979::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU_linalg_lu_factor_ex(const at::Tensor & A, bool pivot, bool check_errors) {
27980structured_linalg_lu_factor_ex_out_functional op;
27981op.meta(A, pivot, check_errors);
27982op.impl(A, pivot, check_errors, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2]);
27983return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take());
27984}
27985struct structured_linalg_lu_factor_ex_out_out final : public at::native::structured_linalg_lu_factor_ex_out {
27986 structured_linalg_lu_factor_ex_out_out(Tensor& out0, Tensor& out1, Tensor& out2) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2) } {}
27987 void set_output_strided(
27988 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
27989 TensorOptions options, DimnameList names
27990 ) override {
27991 const auto& out = outputs_[output_idx].get();
27992 resize_out(out, sizes, strides, options);
27993 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
27994 if (C10_UNLIKELY(maybe_proxy.has_value())) {
27995 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
27996 }
27997 if (!names.empty()) {
27998 namedinference::propagate_names(outputs_[output_idx], names);
27999 }
28000 // super must happen after, so that downstream can use maybe_get_output
28001 // to retrieve the output
28002 }
28003 void set_output_raw_strided(
28004 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28005 TensorOptions options, DimnameList names
28006 ) override {
28007 const auto& out = outputs_[output_idx].get();
28008 resize_out(out, sizes, strides, options);
28009 if (!names.empty()) {
28010 namedinference::propagate_names(outputs_[output_idx], names);
28011 }
28012 // super must happen after, so that downstream can use maybe_get_output
28013 // to retrieve the output
28014 }
28015 const Tensor& maybe_get_output(int64_t output_idx) override {
28016 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
28017 }
28018 std::array<std::reference_wrapper<Tensor>, 3> outputs_;
28019 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 3> proxy_outputs_;
28020};
28021::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_linalg_lu_factor_ex_out_out(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
28022structured_linalg_lu_factor_ex_out_out op(LU, pivots, info);
28023op.meta(A, pivot, check_errors);
28024op.impl(A, pivot, check_errors, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2));
28025if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
28026if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
28027if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(**op.proxy_outputs_[2]);
28028return std::forward_as_tuple(LU, pivots, info);
28029}
28030struct structured_linalg_lu_out_functional final : public at::native::structured_linalg_lu_out {
28031 void set_output_strided(
28032 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28033 TensorOptions options, DimnameList names
28034 ) override {
28035 outputs_[output_idx] = create_out(sizes, strides, options);
28036 if (!names.empty()) {
28037 namedinference::propagate_names(*outputs_[output_idx], names);
28038 }
28039 // super must happen after, so that downstream can use maybe_get_output
28040 // to retrieve the output
28041 }
28042 void set_output_raw_strided(
28043 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28044 TensorOptions options, DimnameList names
28045 ) override {
28046 outputs_[output_idx] = create_out(sizes, strides, options);
28047 if (!names.empty()) {
28048 namedinference::propagate_names(*outputs_[output_idx], names);
28049 }
28050 // super must happen after, so that downstream can use maybe_get_output
28051 // to retrieve the output
28052 }
28053 const Tensor& maybe_get_output(int64_t output_idx) override {
28054 return *outputs_[output_idx];
28055 }
28056 std::array<c10::ExclusivelyOwned<Tensor>, 3> outputs_;
28057};
28058::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU_linalg_lu(const at::Tensor & A, bool pivot) {
28059structured_linalg_lu_out_functional op;
28060op.meta(A, pivot);
28061op.impl(A, pivot, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2]);
28062return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take());
28063}
28064struct structured_linalg_lu_out_out final : public at::native::structured_linalg_lu_out {
28065 structured_linalg_lu_out_out(Tensor& out0, Tensor& out1, Tensor& out2) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2) } {}
28066 void set_output_strided(
28067 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28068 TensorOptions options, DimnameList names
28069 ) override {
28070 const auto& out = outputs_[output_idx].get();
28071 resize_out(out, sizes, strides, options);
28072 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
28073 if (C10_UNLIKELY(maybe_proxy.has_value())) {
28074 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
28075 }
28076 if (!names.empty()) {
28077 namedinference::propagate_names(outputs_[output_idx], names);
28078 }
28079 // super must happen after, so that downstream can use maybe_get_output
28080 // to retrieve the output
28081 }
28082 void set_output_raw_strided(
28083 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28084 TensorOptions options, DimnameList names
28085 ) override {
28086 const auto& out = outputs_[output_idx].get();
28087 resize_out(out, sizes, strides, options);
28088 if (!names.empty()) {
28089 namedinference::propagate_names(outputs_[output_idx], names);
28090 }
28091 // super must happen after, so that downstream can use maybe_get_output
28092 // to retrieve the output
28093 }
28094 const Tensor& maybe_get_output(int64_t output_idx) override {
28095 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
28096 }
28097 std::array<std::reference_wrapper<Tensor>, 3> outputs_;
28098 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 3> proxy_outputs_;
28099};
28100::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_linalg_lu_out_out(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
28101structured_linalg_lu_out_out op(P, L, U);
28102op.meta(A, pivot);
28103op.impl(A, pivot, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2));
28104if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
28105if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
28106if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(**op.proxy_outputs_[2]);
28107return std::forward_as_tuple(P, L, U);
28108}
28109struct structured_linalg_lu_solve_out_functional final : public at::native::structured_linalg_lu_solve_out {
28110 void set_output_strided(
28111 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28112 TensorOptions options, DimnameList names
28113 ) override {
28114 outputs_[output_idx] = create_out(sizes, strides, options);
28115 if (!names.empty()) {
28116 namedinference::propagate_names(*outputs_[output_idx], names);
28117 }
28118 // super must happen after, so that downstream can use maybe_get_output
28119 // to retrieve the output
28120 }
28121 void set_output_raw_strided(
28122 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28123 TensorOptions options, DimnameList names
28124 ) override {
28125 outputs_[output_idx] = create_out(sizes, strides, options);
28126 if (!names.empty()) {
28127 namedinference::propagate_names(*outputs_[output_idx], names);
28128 }
28129 // super must happen after, so that downstream can use maybe_get_output
28130 // to retrieve the output
28131 }
28132 const Tensor& maybe_get_output(int64_t output_idx) override {
28133 return *outputs_[output_idx];
28134 }
28135 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
28136};
28137at::Tensor wrapper_CPU_linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
28138structured_linalg_lu_solve_out_functional op;
28139op.meta(LU, pivots, B, left, adjoint);
28140op.impl(LU, pivots, B, left, adjoint, *op.outputs_[0]);
28141return std::move(op.outputs_[0]).take();
28142}
28143struct structured_linalg_lu_solve_out_out final : public at::native::structured_linalg_lu_solve_out {
28144 structured_linalg_lu_solve_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
28145 void set_output_strided(
28146 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28147 TensorOptions options, DimnameList names
28148 ) override {
28149 const auto& out = outputs_[output_idx].get();
28150 resize_out(out, sizes, strides, options);
28151 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
28152 if (C10_UNLIKELY(maybe_proxy.has_value())) {
28153 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
28154 }
28155 if (!names.empty()) {
28156 namedinference::propagate_names(outputs_[output_idx], names);
28157 }
28158 // super must happen after, so that downstream can use maybe_get_output
28159 // to retrieve the output
28160 }
28161 void set_output_raw_strided(
28162 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28163 TensorOptions options, DimnameList names
28164 ) override {
28165 const auto& out = outputs_[output_idx].get();
28166 resize_out(out, sizes, strides, options);
28167 if (!names.empty()) {
28168 namedinference::propagate_names(outputs_[output_idx], names);
28169 }
28170 // super must happen after, so that downstream can use maybe_get_output
28171 // to retrieve the output
28172 }
28173 const Tensor& maybe_get_output(int64_t output_idx) override {
28174 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
28175 }
28176 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
28177 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
28178};
28179at::Tensor & wrapper_CPU_linalg_lu_solve_out_out(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) {
28180structured_linalg_lu_solve_out_out op(out);
28181op.meta(LU, pivots, B, left, adjoint);
28182op.impl(LU, pivots, B, left, adjoint, op.maybe_get_output(0));
28183if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
28184return out;
28185}
28186struct structured__linalg_det_out_functional final : public at::native::structured__linalg_det_out {
28187 void set_output_strided(
28188 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28189 TensorOptions options, DimnameList names
28190 ) override {
28191 outputs_[output_idx] = create_out(sizes, strides, options);
28192 if (!names.empty()) {
28193 namedinference::propagate_names(*outputs_[output_idx], names);
28194 }
28195 // super must happen after, so that downstream can use maybe_get_output
28196 // to retrieve the output
28197 }
28198 void set_output_raw_strided(
28199 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28200 TensorOptions options, DimnameList names
28201 ) override {
28202 outputs_[output_idx] = create_out(sizes, strides, options);
28203 if (!names.empty()) {
28204 namedinference::propagate_names(*outputs_[output_idx], names);
28205 }
28206 // super must happen after, so that downstream can use maybe_get_output
28207 // to retrieve the output
28208 }
28209 const Tensor& maybe_get_output(int64_t output_idx) override {
28210 return *outputs_[output_idx];
28211 }
28212 std::array<c10::ExclusivelyOwned<Tensor>, 3> outputs_;
28213};
28214::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__linalg_det(const at::Tensor & A) {
28215structured__linalg_det_out_functional op;
28216op.meta(A);
28217op.impl(A, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2]);
28218return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take());
28219}
28220struct structured__linalg_det_out_out final : public at::native::structured__linalg_det_out {
28221 structured__linalg_det_out_out(Tensor& out0, Tensor& out1, Tensor& out2) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2) } {}
28222 void set_output_strided(
28223 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28224 TensorOptions options, DimnameList names
28225 ) override {
28226 const auto& out = outputs_[output_idx].get();
28227 resize_out(out, sizes, strides, options);
28228 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
28229 if (C10_UNLIKELY(maybe_proxy.has_value())) {
28230 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
28231 }
28232 if (!names.empty()) {
28233 namedinference::propagate_names(outputs_[output_idx], names);
28234 }
28235 // super must happen after, so that downstream can use maybe_get_output
28236 // to retrieve the output
28237 }
28238 void set_output_raw_strided(
28239 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28240 TensorOptions options, DimnameList names
28241 ) override {
28242 const auto& out = outputs_[output_idx].get();
28243 resize_out(out, sizes, strides, options);
28244 if (!names.empty()) {
28245 namedinference::propagate_names(outputs_[output_idx], names);
28246 }
28247 // super must happen after, so that downstream can use maybe_get_output
28248 // to retrieve the output
28249 }
28250 const Tensor& maybe_get_output(int64_t output_idx) override {
28251 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
28252 }
28253 std::array<std::reference_wrapper<Tensor>, 3> outputs_;
28254 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 3> proxy_outputs_;
28255};
28256::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU__linalg_det_out_result(const at::Tensor & A, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots) {
28257structured__linalg_det_out_out op(result, LU, pivots);
28258op.meta(A);
28259op.impl(A, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2));
28260if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
28261if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
28262if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(**op.proxy_outputs_[2]);
28263return std::forward_as_tuple(result, LU, pivots);
28264}
28265struct structured_linalg_ldl_factor_ex_out_functional final : public at::native::structured_linalg_ldl_factor_ex_out {
28266 void set_output_strided(
28267 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28268 TensorOptions options, DimnameList names
28269 ) override {
28270 outputs_[output_idx] = create_out(sizes, strides, options);
28271 if (!names.empty()) {
28272 namedinference::propagate_names(*outputs_[output_idx], names);
28273 }
28274 // super must happen after, so that downstream can use maybe_get_output
28275 // to retrieve the output
28276 }
28277 void set_output_raw_strided(
28278 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28279 TensorOptions options, DimnameList names
28280 ) override {
28281 outputs_[output_idx] = create_out(sizes, strides, options);
28282 if (!names.empty()) {
28283 namedinference::propagate_names(*outputs_[output_idx], names);
28284 }
28285 // super must happen after, so that downstream can use maybe_get_output
28286 // to retrieve the output
28287 }
28288 const Tensor& maybe_get_output(int64_t output_idx) override {
28289 return *outputs_[output_idx];
28290 }
28291 std::array<c10::ExclusivelyOwned<Tensor>, 3> outputs_;
28292};
28293::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU_linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian, bool check_errors) {
28294structured_linalg_ldl_factor_ex_out_functional op;
28295op.meta(self, hermitian, check_errors);
28296op.impl(self, hermitian, check_errors, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2]);
28297return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take());
28298}
28299struct structured_linalg_ldl_factor_ex_out_out final : public at::native::structured_linalg_ldl_factor_ex_out {
28300 structured_linalg_ldl_factor_ex_out_out(Tensor& out0, Tensor& out1, Tensor& out2) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2) } {}
28301 void set_output_strided(
28302 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28303 TensorOptions options, DimnameList names
28304 ) override {
28305 const auto& out = outputs_[output_idx].get();
28306 resize_out(out, sizes, strides, options);
28307 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
28308 if (C10_UNLIKELY(maybe_proxy.has_value())) {
28309 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
28310 }
28311 if (!names.empty()) {
28312 namedinference::propagate_names(outputs_[output_idx], names);
28313 }
28314 // super must happen after, so that downstream can use maybe_get_output
28315 // to retrieve the output
28316 }
28317 void set_output_raw_strided(
28318 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28319 TensorOptions options, DimnameList names
28320 ) override {
28321 const auto& out = outputs_[output_idx].get();
28322 resize_out(out, sizes, strides, options);
28323 if (!names.empty()) {
28324 namedinference::propagate_names(outputs_[output_idx], names);
28325 }
28326 // super must happen after, so that downstream can use maybe_get_output
28327 // to retrieve the output
28328 }
28329 const Tensor& maybe_get_output(int64_t output_idx) override {
28330 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
28331 }
28332 std::array<std::reference_wrapper<Tensor>, 3> outputs_;
28333 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 3> proxy_outputs_;
28334};
28335::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_linalg_ldl_factor_ex_out_out(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) {
28336structured_linalg_ldl_factor_ex_out_out op(LD, pivots, info);
28337op.meta(self, hermitian, check_errors);
28338op.impl(self, hermitian, check_errors, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2));
28339if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
28340if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
28341if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(**op.proxy_outputs_[2]);
28342return std::forward_as_tuple(LD, pivots, info);
28343}
28344struct structured_linalg_ldl_solve_out_functional final : public at::native::structured_linalg_ldl_solve_out {
28345 void set_output_strided(
28346 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28347 TensorOptions options, DimnameList names
28348 ) override {
28349 outputs_[output_idx] = create_out(sizes, strides, options);
28350 if (!names.empty()) {
28351 namedinference::propagate_names(*outputs_[output_idx], names);
28352 }
28353 // super must happen after, so that downstream can use maybe_get_output
28354 // to retrieve the output
28355 }
28356 void set_output_raw_strided(
28357 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28358 TensorOptions options, DimnameList names
28359 ) override {
28360 outputs_[output_idx] = create_out(sizes, strides, options);
28361 if (!names.empty()) {
28362 namedinference::propagate_names(*outputs_[output_idx], names);
28363 }
28364 // super must happen after, so that downstream can use maybe_get_output
28365 // to retrieve the output
28366 }
28367 const Tensor& maybe_get_output(int64_t output_idx) override {
28368 return *outputs_[output_idx];
28369 }
28370 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
28371};
28372at::Tensor wrapper_CPU_linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
28373structured_linalg_ldl_solve_out_functional op;
28374op.meta(LD, pivots, B, hermitian);
28375op.impl(LD, pivots, B, hermitian, *op.outputs_[0]);
28376return std::move(op.outputs_[0]).take();
28377}
28378struct structured_linalg_ldl_solve_out_out final : public at::native::structured_linalg_ldl_solve_out {
28379 structured_linalg_ldl_solve_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
28380 void set_output_strided(
28381 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28382 TensorOptions options, DimnameList names
28383 ) override {
28384 const auto& out = outputs_[output_idx].get();
28385 resize_out(out, sizes, strides, options);
28386 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
28387 if (C10_UNLIKELY(maybe_proxy.has_value())) {
28388 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
28389 }
28390 if (!names.empty()) {
28391 namedinference::propagate_names(outputs_[output_idx], names);
28392 }
28393 // super must happen after, so that downstream can use maybe_get_output
28394 // to retrieve the output
28395 }
28396 void set_output_raw_strided(
28397 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28398 TensorOptions options, DimnameList names
28399 ) override {
28400 const auto& out = outputs_[output_idx].get();
28401 resize_out(out, sizes, strides, options);
28402 if (!names.empty()) {
28403 namedinference::propagate_names(outputs_[output_idx], names);
28404 }
28405 // super must happen after, so that downstream can use maybe_get_output
28406 // to retrieve the output
28407 }
28408 const Tensor& maybe_get_output(int64_t output_idx) override {
28409 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
28410 }
28411 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
28412 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
28413};
28414at::Tensor & wrapper_CPU_linalg_ldl_solve_out_out(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) {
28415structured_linalg_ldl_solve_out_out op(out);
28416op.meta(LD, pivots, B, hermitian);
28417op.impl(LD, pivots, B, hermitian, op.maybe_get_output(0));
28418if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
28419return out;
28420}
28421namespace {
28422::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU_out_linalg_lstsq_out(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) {
28423 // No device check
28424 // DeviceGuard omitted
28425 return at::native::linalg_lstsq_out(self, b, rcond, driver, solution, residuals, rank, singular_values);
28426}
28427} // anonymous namespace
28428namespace {
28429at::Tensor wrapper_CPU__linalg_matrix_exp(const at::Tensor & self) {
28430 // No device check
28431 // DeviceGuard omitted
28432 return at::native::linalg_matrix_exp(self);
28433}
28434} // anonymous namespace
28435struct structured__linalg_slogdet_out_functional final : public at::native::structured__linalg_slogdet_out {
28436 void set_output_strided(
28437 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28438 TensorOptions options, DimnameList names
28439 ) override {
28440 outputs_[output_idx] = create_out(sizes, strides, options);
28441 if (!names.empty()) {
28442 namedinference::propagate_names(*outputs_[output_idx], names);
28443 }
28444 // super must happen after, so that downstream can use maybe_get_output
28445 // to retrieve the output
28446 }
28447 void set_output_raw_strided(
28448 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28449 TensorOptions options, DimnameList names
28450 ) override {
28451 outputs_[output_idx] = create_out(sizes, strides, options);
28452 if (!names.empty()) {
28453 namedinference::propagate_names(*outputs_[output_idx], names);
28454 }
28455 // super must happen after, so that downstream can use maybe_get_output
28456 // to retrieve the output
28457 }
28458 const Tensor& maybe_get_output(int64_t output_idx) override {
28459 return *outputs_[output_idx];
28460 }
28461 std::array<c10::ExclusivelyOwned<Tensor>, 4> outputs_;
28462};
28463::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__linalg_slogdet(const at::Tensor & A) {
28464structured__linalg_slogdet_out_functional op;
28465op.meta(A);
28466op.impl(A, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2], *op.outputs_[3]);
28467return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take(), std::move(op.outputs_[3]).take());
28468}
28469struct structured__linalg_slogdet_out_out final : public at::native::structured__linalg_slogdet_out {
28470 structured__linalg_slogdet_out_out(Tensor& out0, Tensor& out1, Tensor& out2, Tensor& out3) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2), std::ref(out3) } {}
28471 void set_output_strided(
28472 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28473 TensorOptions options, DimnameList names
28474 ) override {
28475 const auto& out = outputs_[output_idx].get();
28476 resize_out(out, sizes, strides, options);
28477 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
28478 if (C10_UNLIKELY(maybe_proxy.has_value())) {
28479 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
28480 }
28481 if (!names.empty()) {
28482 namedinference::propagate_names(outputs_[output_idx], names);
28483 }
28484 // super must happen after, so that downstream can use maybe_get_output
28485 // to retrieve the output
28486 }
28487 void set_output_raw_strided(
28488 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28489 TensorOptions options, DimnameList names
28490 ) override {
28491 const auto& out = outputs_[output_idx].get();
28492 resize_out(out, sizes, strides, options);
28493 if (!names.empty()) {
28494 namedinference::propagate_names(outputs_[output_idx], names);
28495 }
28496 // super must happen after, so that downstream can use maybe_get_output
28497 // to retrieve the output
28498 }
28499 const Tensor& maybe_get_output(int64_t output_idx) override {
28500 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
28501 }
28502 std::array<std::reference_wrapper<Tensor>, 4> outputs_;
28503 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 4> proxy_outputs_;
28504};
28505::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU__linalg_slogdet_out_sign(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots) {
28506structured__linalg_slogdet_out_out op(sign, logabsdet, LU, pivots);
28507op.meta(A);
28508op.impl(A, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2), op.maybe_get_output(3));
28509if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
28510if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
28511if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(**op.proxy_outputs_[2]);
28512if (op.proxy_outputs_[3].has_value()) op.outputs_[3].get().copy_(**op.proxy_outputs_[3]);
28513return std::forward_as_tuple(sign, logabsdet, LU, pivots);
28514}
28515namespace {
28516::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__linalg_eig(const at::Tensor & self) {
28517 // No device check
28518 // DeviceGuard omitted
28519 return at::native::linalg_eig(self);
28520}
28521} // anonymous namespace
28522namespace {
28523::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_out_linalg_eig_out(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
28524 // No device check
28525 // DeviceGuard omitted
28526 return at::native::linalg_eig_out(self, eigenvalues, eigenvectors);
28527}
28528} // anonymous namespace
28529struct structured__linalg_eigh_out_functional final : public at::native::structured__linalg_eigh_out {
28530 void set_output_strided(
28531 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28532 TensorOptions options, DimnameList names
28533 ) override {
28534 outputs_[output_idx] = create_out(sizes, strides, options);
28535 if (!names.empty()) {
28536 namedinference::propagate_names(*outputs_[output_idx], names);
28537 }
28538 // super must happen after, so that downstream can use maybe_get_output
28539 // to retrieve the output
28540 }
28541 void set_output_raw_strided(
28542 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28543 TensorOptions options, DimnameList names
28544 ) override {
28545 outputs_[output_idx] = create_out(sizes, strides, options);
28546 if (!names.empty()) {
28547 namedinference::propagate_names(*outputs_[output_idx], names);
28548 }
28549 // super must happen after, so that downstream can use maybe_get_output
28550 // to retrieve the output
28551 }
28552 const Tensor& maybe_get_output(int64_t output_idx) override {
28553 return *outputs_[output_idx];
28554 }
28555 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
28556};
28557::std::tuple<at::Tensor,at::Tensor> wrapper_CPU__linalg_eigh(const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
28558structured__linalg_eigh_out_functional op;
28559op.meta(A, UPLO, compute_v);
28560op.impl(A, UPLO, compute_v, *op.outputs_[0], *op.outputs_[1]);
28561return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
28562}
28563struct structured__linalg_eigh_out_out final : public at::native::structured__linalg_eigh_out {
28564 structured__linalg_eigh_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
28565 void set_output_strided(
28566 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28567 TensorOptions options, DimnameList names
28568 ) override {
28569 const auto& out = outputs_[output_idx].get();
28570 resize_out(out, sizes, strides, options);
28571 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
28572 if (C10_UNLIKELY(maybe_proxy.has_value())) {
28573 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
28574 }
28575 if (!names.empty()) {
28576 namedinference::propagate_names(outputs_[output_idx], names);
28577 }
28578 // super must happen after, so that downstream can use maybe_get_output
28579 // to retrieve the output
28580 }
28581 void set_output_raw_strided(
28582 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28583 TensorOptions options, DimnameList names
28584 ) override {
28585 const auto& out = outputs_[output_idx].get();
28586 resize_out(out, sizes, strides, options);
28587 if (!names.empty()) {
28588 namedinference::propagate_names(outputs_[output_idx], names);
28589 }
28590 // super must happen after, so that downstream can use maybe_get_output
28591 // to retrieve the output
28592 }
28593 const Tensor& maybe_get_output(int64_t output_idx) override {
28594 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
28595 }
28596 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
28597 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
28598};
28599::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU__linalg_eigh_out_eigenvalues(const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
28600structured__linalg_eigh_out_out op(eigenvalues, eigenvectors);
28601op.meta(A, UPLO, compute_v);
28602op.impl(A, UPLO, compute_v, op.maybe_get_output(0), op.maybe_get_output(1));
28603if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
28604if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
28605return std::forward_as_tuple(eigenvalues, eigenvectors);
28606}
28607namespace {
28608at::Tensor wrapper_CPU__linalg_householder_product(const at::Tensor & input, const at::Tensor & tau) {
28609 // No device check
28610 // DeviceGuard omitted
28611 return at::native::linalg_householder_product(input, tau);
28612}
28613} // anonymous namespace
28614namespace {
28615at::Tensor & wrapper_CPU_out_linalg_householder_product_out(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) {
28616 // No device check
28617 // DeviceGuard omitted
28618 return at::native::linalg_householder_product_out(input, tau, out);
28619}
28620} // anonymous namespace
28621struct structured_linalg_inv_ex_out_functional final : public at::native::structured_linalg_inv_ex_out {
28622 void set_output_strided(
28623 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28624 TensorOptions options, DimnameList names
28625 ) override {
28626 outputs_[output_idx] = create_out(sizes, strides, options);
28627 if (!names.empty()) {
28628 namedinference::propagate_names(*outputs_[output_idx], names);
28629 }
28630 // super must happen after, so that downstream can use maybe_get_output
28631 // to retrieve the output
28632 }
28633 void set_output_raw_strided(
28634 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28635 TensorOptions options, DimnameList names
28636 ) override {
28637 outputs_[output_idx] = create_out(sizes, strides, options);
28638 if (!names.empty()) {
28639 namedinference::propagate_names(*outputs_[output_idx], names);
28640 }
28641 // super must happen after, so that downstream can use maybe_get_output
28642 // to retrieve the output
28643 }
28644 const Tensor& maybe_get_output(int64_t output_idx) override {
28645 return *outputs_[output_idx];
28646 }
28647 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
28648};
28649::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_linalg_inv_ex(const at::Tensor & A, bool check_errors) {
28650structured_linalg_inv_ex_out_functional op;
28651op.meta(A, check_errors);
28652op.impl(A, check_errors, *op.outputs_[0], *op.outputs_[1]);
28653return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
28654}
28655struct structured_linalg_inv_ex_out_out final : public at::native::structured_linalg_inv_ex_out {
28656 structured_linalg_inv_ex_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
28657 void set_output_strided(
28658 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28659 TensorOptions options, DimnameList names
28660 ) override {
28661 const auto& out = outputs_[output_idx].get();
28662 resize_out(out, sizes, strides, options);
28663 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
28664 if (C10_UNLIKELY(maybe_proxy.has_value())) {
28665 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
28666 }
28667 if (!names.empty()) {
28668 namedinference::propagate_names(outputs_[output_idx], names);
28669 }
28670 // super must happen after, so that downstream can use maybe_get_output
28671 // to retrieve the output
28672 }
28673 void set_output_raw_strided(
28674 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28675 TensorOptions options, DimnameList names
28676 ) override {
28677 const auto& out = outputs_[output_idx].get();
28678 resize_out(out, sizes, strides, options);
28679 if (!names.empty()) {
28680 namedinference::propagate_names(outputs_[output_idx], names);
28681 }
28682 // super must happen after, so that downstream can use maybe_get_output
28683 // to retrieve the output
28684 }
28685 const Tensor& maybe_get_output(int64_t output_idx) override {
28686 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
28687 }
28688 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
28689 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
28690};
28691::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_linalg_inv_ex_out_inverse(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) {
28692structured_linalg_inv_ex_out_out op(inverse, info);
28693op.meta(A, check_errors);
28694op.impl(A, check_errors, op.maybe_get_output(0), op.maybe_get_output(1));
28695if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
28696if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
28697return std::forward_as_tuple(inverse, info);
28698}
28699struct structured_linalg_vector_norm_out_functional final : public at::native::structured_linalg_vector_norm_out {
28700 void set_output_strided(
28701 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28702 TensorOptions options, DimnameList names
28703 ) override {
28704 outputs_[output_idx] = create_out(sizes, strides, options);
28705 if (!names.empty()) {
28706 namedinference::propagate_names(*outputs_[output_idx], names);
28707 }
28708 // super must happen after, so that downstream can use maybe_get_output
28709 // to retrieve the output
28710 }
28711 void set_output_raw_strided(
28712 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28713 TensorOptions options, DimnameList names
28714 ) override {
28715 outputs_[output_idx] = create_out(sizes, strides, options);
28716 if (!names.empty()) {
28717 namedinference::propagate_names(*outputs_[output_idx], names);
28718 }
28719 // super must happen after, so that downstream can use maybe_get_output
28720 // to retrieve the output
28721 }
28722 const Tensor& maybe_get_output(int64_t output_idx) override {
28723 return *outputs_[output_idx];
28724 }
28725 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
28726};
28727at::Tensor wrapper_CPU_linalg_vector_norm(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
28728structured_linalg_vector_norm_out_functional op;
28729op.meta(self, ord, dim, keepdim, dtype);
28730op.impl(self, ord, dim, keepdim, dtype, *op.outputs_[0]);
28731return std::move(op.outputs_[0]).take();
28732}
28733struct structured_linalg_vector_norm_out_out final : public at::native::structured_linalg_vector_norm_out {
28734 structured_linalg_vector_norm_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
28735 void set_output_strided(
28736 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28737 TensorOptions options, DimnameList names
28738 ) override {
28739 const auto& out = outputs_[output_idx].get();
28740 resize_out(out, sizes, strides, options);
28741 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
28742 if (C10_UNLIKELY(maybe_proxy.has_value())) {
28743 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
28744 }
28745 if (!names.empty()) {
28746 namedinference::propagate_names(outputs_[output_idx], names);
28747 }
28748 // super must happen after, so that downstream can use maybe_get_output
28749 // to retrieve the output
28750 }
28751 void set_output_raw_strided(
28752 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28753 TensorOptions options, DimnameList names
28754 ) override {
28755 const auto& out = outputs_[output_idx].get();
28756 resize_out(out, sizes, strides, options);
28757 if (!names.empty()) {
28758 namedinference::propagate_names(outputs_[output_idx], names);
28759 }
28760 // super must happen after, so that downstream can use maybe_get_output
28761 // to retrieve the output
28762 }
28763 const Tensor& maybe_get_output(int64_t output_idx) override {
28764 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
28765 }
28766 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
28767 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
28768};
28769at::Tensor & wrapper_CPU_linalg_vector_norm_out_out(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
28770structured_linalg_vector_norm_out_out op(out);
28771op.meta(self, ord, dim, keepdim, dtype);
28772op.impl(self, ord, dim, keepdim, dtype, op.maybe_get_output(0));
28773if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
28774return out;
28775}
28776struct structured__linalg_svd_out_functional final : public at::native::structured__linalg_svd_out {
28777 void set_output_strided(
28778 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28779 TensorOptions options, DimnameList names
28780 ) override {
28781 outputs_[output_idx] = create_out(sizes, strides, options);
28782 if (!names.empty()) {
28783 namedinference::propagate_names(*outputs_[output_idx], names);
28784 }
28785 // super must happen after, so that downstream can use maybe_get_output
28786 // to retrieve the output
28787 }
28788 void set_output_raw_strided(
28789 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28790 TensorOptions options, DimnameList names
28791 ) override {
28792 outputs_[output_idx] = create_out(sizes, strides, options);
28793 if (!names.empty()) {
28794 namedinference::propagate_names(*outputs_[output_idx], names);
28795 }
28796 // super must happen after, so that downstream can use maybe_get_output
28797 // to retrieve the output
28798 }
28799 const Tensor& maybe_get_output(int64_t output_idx) override {
28800 return *outputs_[output_idx];
28801 }
28802 std::array<c10::ExclusivelyOwned<Tensor>, 3> outputs_;
28803};
28804::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__linalg_svd(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver) {
28805structured__linalg_svd_out_functional op;
28806op.meta(A, full_matrices, compute_uv, driver);
28807op.impl(A, full_matrices, compute_uv, driver, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2]);
28808return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take());
28809}
28810struct structured__linalg_svd_out_out final : public at::native::structured__linalg_svd_out {
28811 structured__linalg_svd_out_out(Tensor& out0, Tensor& out1, Tensor& out2) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2) } {}
28812 void set_output_strided(
28813 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28814 TensorOptions options, DimnameList names
28815 ) override {
28816 const auto& out = outputs_[output_idx].get();
28817 resize_out(out, sizes, strides, options);
28818 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
28819 if (C10_UNLIKELY(maybe_proxy.has_value())) {
28820 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
28821 }
28822 if (!names.empty()) {
28823 namedinference::propagate_names(outputs_[output_idx], names);
28824 }
28825 // super must happen after, so that downstream can use maybe_get_output
28826 // to retrieve the output
28827 }
28828 void set_output_raw_strided(
28829 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28830 TensorOptions options, DimnameList names
28831 ) override {
28832 const auto& out = outputs_[output_idx].get();
28833 resize_out(out, sizes, strides, options);
28834 if (!names.empty()) {
28835 namedinference::propagate_names(outputs_[output_idx], names);
28836 }
28837 // super must happen after, so that downstream can use maybe_get_output
28838 // to retrieve the output
28839 }
28840 const Tensor& maybe_get_output(int64_t output_idx) override {
28841 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
28842 }
28843 std::array<std::reference_wrapper<Tensor>, 3> outputs_;
28844 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 3> proxy_outputs_;
28845};
28846::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU__linalg_svd_out_U(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
28847structured__linalg_svd_out_out op(U, S, Vh);
28848op.meta(A, full_matrices, compute_uv, driver);
28849op.impl(A, full_matrices, compute_uv, driver, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2));
28850if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
28851if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
28852if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(**op.proxy_outputs_[2]);
28853return std::forward_as_tuple(U, S, Vh);
28854}
28855struct structured__linalg_solve_ex_out_functional final : public at::native::structured__linalg_solve_ex_out {
28856 void set_output_strided(
28857 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28858 TensorOptions options, DimnameList names
28859 ) override {
28860 outputs_[output_idx] = create_out(sizes, strides, options);
28861 if (!names.empty()) {
28862 namedinference::propagate_names(*outputs_[output_idx], names);
28863 }
28864 // super must happen after, so that downstream can use maybe_get_output
28865 // to retrieve the output
28866 }
28867 void set_output_raw_strided(
28868 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28869 TensorOptions options, DimnameList names
28870 ) override {
28871 outputs_[output_idx] = create_out(sizes, strides, options);
28872 if (!names.empty()) {
28873 namedinference::propagate_names(*outputs_[output_idx], names);
28874 }
28875 // super must happen after, so that downstream can use maybe_get_output
28876 // to retrieve the output
28877 }
28878 const Tensor& maybe_get_output(int64_t output_idx) override {
28879 return *outputs_[output_idx];
28880 }
28881 std::array<c10::ExclusivelyOwned<Tensor>, 4> outputs_;
28882};
28883::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CPU__linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
28884structured__linalg_solve_ex_out_functional op;
28885op.meta(A, B, left, check_errors);
28886op.impl(A, B, left, check_errors, *op.outputs_[0], *op.outputs_[1], *op.outputs_[2], *op.outputs_[3]);
28887return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take(), std::move(op.outputs_[2]).take(), std::move(op.outputs_[3]).take());
28888}
28889struct structured__linalg_solve_ex_out_out final : public at::native::structured__linalg_solve_ex_out {
28890 structured__linalg_solve_ex_out_out(Tensor& out0, Tensor& out1, Tensor& out2, Tensor& out3) : outputs_{ std::ref(out0), std::ref(out1), std::ref(out2), std::ref(out3) } {}
28891 void set_output_strided(
28892 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28893 TensorOptions options, DimnameList names
28894 ) override {
28895 const auto& out = outputs_[output_idx].get();
28896 resize_out(out, sizes, strides, options);
28897 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
28898 if (C10_UNLIKELY(maybe_proxy.has_value())) {
28899 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
28900 }
28901 if (!names.empty()) {
28902 namedinference::propagate_names(outputs_[output_idx], names);
28903 }
28904 // super must happen after, so that downstream can use maybe_get_output
28905 // to retrieve the output
28906 }
28907 void set_output_raw_strided(
28908 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28909 TensorOptions options, DimnameList names
28910 ) override {
28911 const auto& out = outputs_[output_idx].get();
28912 resize_out(out, sizes, strides, options);
28913 if (!names.empty()) {
28914 namedinference::propagate_names(outputs_[output_idx], names);
28915 }
28916 // super must happen after, so that downstream can use maybe_get_output
28917 // to retrieve the output
28918 }
28919 const Tensor& maybe_get_output(int64_t output_idx) override {
28920 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
28921 }
28922 std::array<std::reference_wrapper<Tensor>, 4> outputs_;
28923 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 4> proxy_outputs_;
28924};
28925::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CPU__linalg_solve_ex_out_result(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
28926structured__linalg_solve_ex_out_out op(result, LU, pivots, info);
28927op.meta(A, B, left, check_errors);
28928op.impl(A, B, left, check_errors, op.maybe_get_output(0), op.maybe_get_output(1), op.maybe_get_output(2), op.maybe_get_output(3));
28929if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
28930if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
28931if (op.proxy_outputs_[2].has_value()) op.outputs_[2].get().copy_(**op.proxy_outputs_[2]);
28932if (op.proxy_outputs_[3].has_value()) op.outputs_[3].get().copy_(**op.proxy_outputs_[3]);
28933return std::forward_as_tuple(result, LU, pivots, info);
28934}
28935struct structured_linalg_qr_out_functional final : public at::native::structured_linalg_qr_out {
28936 void set_output_strided(
28937 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28938 TensorOptions options, DimnameList names
28939 ) override {
28940 outputs_[output_idx] = create_out(sizes, strides, options);
28941 if (!names.empty()) {
28942 namedinference::propagate_names(*outputs_[output_idx], names);
28943 }
28944 // super must happen after, so that downstream can use maybe_get_output
28945 // to retrieve the output
28946 }
28947 void set_output_raw_strided(
28948 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28949 TensorOptions options, DimnameList names
28950 ) override {
28951 outputs_[output_idx] = create_out(sizes, strides, options);
28952 if (!names.empty()) {
28953 namedinference::propagate_names(*outputs_[output_idx], names);
28954 }
28955 // super must happen after, so that downstream can use maybe_get_output
28956 // to retrieve the output
28957 }
28958 const Tensor& maybe_get_output(int64_t output_idx) override {
28959 return *outputs_[output_idx];
28960 }
28961 std::array<c10::ExclusivelyOwned<Tensor>, 2> outputs_;
28962};
28963::std::tuple<at::Tensor,at::Tensor> wrapper_CPU_linalg_qr(const at::Tensor & A, c10::string_view mode) {
28964structured_linalg_qr_out_functional op;
28965op.meta(A, mode);
28966op.impl(A, mode, *op.outputs_[0], *op.outputs_[1]);
28967return std::make_tuple(std::move(op.outputs_[0]).take(), std::move(op.outputs_[1]).take());
28968}
28969struct structured_linalg_qr_out_out final : public at::native::structured_linalg_qr_out {
28970 structured_linalg_qr_out_out(Tensor& out0, Tensor& out1) : outputs_{ std::ref(out0), std::ref(out1) } {}
28971 void set_output_strided(
28972 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28973 TensorOptions options, DimnameList names
28974 ) override {
28975 const auto& out = outputs_[output_idx].get();
28976 resize_out(out, sizes, strides, options);
28977 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
28978 if (C10_UNLIKELY(maybe_proxy.has_value())) {
28979 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
28980 }
28981 if (!names.empty()) {
28982 namedinference::propagate_names(outputs_[output_idx], names);
28983 }
28984 // super must happen after, so that downstream can use maybe_get_output
28985 // to retrieve the output
28986 }
28987 void set_output_raw_strided(
28988 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
28989 TensorOptions options, DimnameList names
28990 ) override {
28991 const auto& out = outputs_[output_idx].get();
28992 resize_out(out, sizes, strides, options);
28993 if (!names.empty()) {
28994 namedinference::propagate_names(outputs_[output_idx], names);
28995 }
28996 // super must happen after, so that downstream can use maybe_get_output
28997 // to retrieve the output
28998 }
28999 const Tensor& maybe_get_output(int64_t output_idx) override {
29000 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
29001 }
29002 std::array<std::reference_wrapper<Tensor>, 2> outputs_;
29003 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 2> proxy_outputs_;
29004};
29005::std::tuple<at::Tensor &,at::Tensor &> wrapper_CPU_linalg_qr_out_out(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) {
29006structured_linalg_qr_out_out op(Q, R);
29007op.meta(A, mode);
29008op.impl(A, mode, op.maybe_get_output(0), op.maybe_get_output(1));
29009if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
29010if (op.proxy_outputs_[1].has_value()) op.outputs_[1].get().copy_(**op.proxy_outputs_[1]);
29011return std::forward_as_tuple(Q, R);
29012}
29013namespace {
29014at::Tensor wrapper_CPU___test_optional_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends) {
29015 // No device check
29016 // DeviceGuard omitted
29017 return at::native::_test_optional_intlist(values, addends);
29018}
29019} // anonymous namespace
29020namespace {
29021at::Tensor wrapper_CPU___test_optional_filled_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends) {
29022 // No device check
29023 // DeviceGuard omitted
29024 return at::native::_test_optional_intlist(values, addends);
29025}
29026} // anonymous namespace
29027namespace {
29028at::Tensor wrapper_CPU___test_optional_floatlist(const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends) {
29029 // No device check
29030 // DeviceGuard omitted
29031 return at::native::_test_optional_floatlist(values, addends);
29032}
29033} // anonymous namespace
29034namespace {
29035at::Tensor wrapper_CPU__segment_reduce(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial) {
29036 // No device check
29037 // DeviceGuard omitted
29038 return at::native::segment_reduce_kernel(data, reduce, lengths, indices, offsets, axis, unsafe, initial);
29039}
29040} // anonymous namespace
29041namespace {
29042at::Tensor wrapper_CPU___segment_reduce_backward(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial) {
29043 // No device check
29044 // DeviceGuard omitted
29045 return at::native::_segment_reduce_backward_kernel(grad, output, data, reduce, lengths, offsets, axis, initial);
29046}
29047} // anonymous namespace
29048namespace {
29049at::Tensor wrapper_CPU___transformer_encoder_layer_fwd(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type) {
29050 // No device check
29051 // DeviceGuard omitted
29052 return at::native::transformer_encoder_layer_forward(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
29053}
29054} // anonymous namespace
29055namespace {
29056::std::tuple<at::Tensor,at::Tensor> wrapper_CPU___native_multi_head_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type) {
29057 // No device check
29058 // DeviceGuard omitted
29059 return at::native::native_multi_head_attention_cpu(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
29060}
29061} // anonymous namespace
29062namespace {
29063int64_t wrapper_CPU___fused_sdp_choice(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal) {
29064 // No device check
29065 // DeviceGuard omitted
29066 return at::native::_fused_sdp_choice_cpp(query, key, value, attn_mask, dropout_p, is_causal);
29067}
29068} // anonymous namespace
29069struct structured_special_airy_ai_out_functional final : public at::native::structured_special_airy_ai_out {
29070 void set_output_strided(
29071 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29072 TensorOptions options, DimnameList names
29073 ) override {
29074 outputs_[output_idx] = create_out(sizes, strides, options);
29075 if (!names.empty()) {
29076 namedinference::propagate_names(*outputs_[output_idx], names);
29077 }
29078 // super must happen after, so that downstream can use maybe_get_output
29079 // to retrieve the output
29080 at::native::structured_special_airy_ai_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29081 }
29082 void set_output_raw_strided(
29083 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29084 TensorOptions options, DimnameList names
29085 ) override {
29086 outputs_[output_idx] = create_out(sizes, strides, options);
29087 if (!names.empty()) {
29088 namedinference::propagate_names(*outputs_[output_idx], names);
29089 }
29090 // super must happen after, so that downstream can use maybe_get_output
29091 // to retrieve the output
29092 at::native::structured_special_airy_ai_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29093 }
29094 const Tensor& maybe_get_output(int64_t output_idx) override {
29095 return *outputs_[output_idx];
29096 }
29097 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
29098};
29099at::Tensor wrapper_CPU_special_airy_ai(const at::Tensor & x) {
29100structured_special_airy_ai_out_functional op;
29101op.meta(x);
29102op.impl(x, *op.outputs_[0]);
29103return std::move(op.outputs_[0]).take();
29104}
29105struct structured_special_airy_ai_out_out final : public at::native::structured_special_airy_ai_out {
29106 structured_special_airy_ai_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
29107 void set_output_strided(
29108 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29109 TensorOptions options, DimnameList names
29110 ) override {
29111 const auto& out = outputs_[output_idx].get();
29112 resize_out(out, sizes, strides, options);
29113 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
29114 if (C10_UNLIKELY(maybe_proxy.has_value())) {
29115 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
29116 }
29117 if (!names.empty()) {
29118 namedinference::propagate_names(outputs_[output_idx], names);
29119 }
29120 // super must happen after, so that downstream can use maybe_get_output
29121 // to retrieve the output
29122 at::native::structured_special_airy_ai_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29123 }
29124 void set_output_raw_strided(
29125 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29126 TensorOptions options, DimnameList names
29127 ) override {
29128 const auto& out = outputs_[output_idx].get();
29129 resize_out(out, sizes, strides, options);
29130 if (!names.empty()) {
29131 namedinference::propagate_names(outputs_[output_idx], names);
29132 }
29133 // super must happen after, so that downstream can use maybe_get_output
29134 // to retrieve the output
29135 at::native::structured_special_airy_ai_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29136 }
29137 const Tensor& maybe_get_output(int64_t output_idx) override {
29138 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
29139 }
29140 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
29141 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
29142};
29143at::Tensor & wrapper_CPU_special_airy_ai_out_out(const at::Tensor & x, at::Tensor & out) {
29144structured_special_airy_ai_out_out op(out);
29145op.meta(x);
29146op.impl(x, op.maybe_get_output(0));
29147if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
29148return out;
29149}
29150namespace {
29151::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___transformer_decoder_only_layer_fwd(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value) {
29152 // No device check
29153 // DeviceGuard omitted
29154 return at::native::transformer_decoder_only_layer_forward(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value);
29155}
29156} // anonymous namespace
29157namespace {
29158::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CPU___native_decoder_only_multi_head_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, bool need_weights, bool average_attn_weights) {
29159 // No device check
29160 // DeviceGuard omitted
29161 return at::native::native_decoder_only_multi_head_attention(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights);
29162}
29163} // anonymous namespace
29164struct structured_special_bessel_j0_out_functional final : public at::native::structured_special_bessel_j0_out {
29165 void set_output_strided(
29166 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29167 TensorOptions options, DimnameList names
29168 ) override {
29169 outputs_[output_idx] = create_out(sizes, strides, options);
29170 if (!names.empty()) {
29171 namedinference::propagate_names(*outputs_[output_idx], names);
29172 }
29173 // super must happen after, so that downstream can use maybe_get_output
29174 // to retrieve the output
29175 at::native::structured_special_bessel_j0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29176 }
29177 void set_output_raw_strided(
29178 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29179 TensorOptions options, DimnameList names
29180 ) override {
29181 outputs_[output_idx] = create_out(sizes, strides, options);
29182 if (!names.empty()) {
29183 namedinference::propagate_names(*outputs_[output_idx], names);
29184 }
29185 // super must happen after, so that downstream can use maybe_get_output
29186 // to retrieve the output
29187 at::native::structured_special_bessel_j0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29188 }
29189 const Tensor& maybe_get_output(int64_t output_idx) override {
29190 return *outputs_[output_idx];
29191 }
29192 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
29193};
29194at::Tensor wrapper_CPU_special_bessel_j0(const at::Tensor & self) {
29195structured_special_bessel_j0_out_functional op;
29196op.meta(self);
29197op.impl(self, *op.outputs_[0]);
29198return std::move(op.outputs_[0]).take();
29199}
29200struct structured_special_bessel_j0_out_out final : public at::native::structured_special_bessel_j0_out {
29201 structured_special_bessel_j0_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
29202 void set_output_strided(
29203 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29204 TensorOptions options, DimnameList names
29205 ) override {
29206 const auto& out = outputs_[output_idx].get();
29207 resize_out(out, sizes, strides, options);
29208 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
29209 if (C10_UNLIKELY(maybe_proxy.has_value())) {
29210 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
29211 }
29212 if (!names.empty()) {
29213 namedinference::propagate_names(outputs_[output_idx], names);
29214 }
29215 // super must happen after, so that downstream can use maybe_get_output
29216 // to retrieve the output
29217 at::native::structured_special_bessel_j0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29218 }
29219 void set_output_raw_strided(
29220 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29221 TensorOptions options, DimnameList names
29222 ) override {
29223 const auto& out = outputs_[output_idx].get();
29224 resize_out(out, sizes, strides, options);
29225 if (!names.empty()) {
29226 namedinference::propagate_names(outputs_[output_idx], names);
29227 }
29228 // super must happen after, so that downstream can use maybe_get_output
29229 // to retrieve the output
29230 at::native::structured_special_bessel_j0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29231 }
29232 const Tensor& maybe_get_output(int64_t output_idx) override {
29233 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
29234 }
29235 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
29236 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
29237};
29238at::Tensor & wrapper_CPU_special_bessel_j0_out_out(const at::Tensor & self, at::Tensor & out) {
29239structured_special_bessel_j0_out_out op(out);
29240op.meta(self);
29241op.impl(self, op.maybe_get_output(0));
29242if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
29243return out;
29244}
29245struct structured_special_bessel_j1_out_functional final : public at::native::structured_special_bessel_j1_out {
29246 void set_output_strided(
29247 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29248 TensorOptions options, DimnameList names
29249 ) override {
29250 outputs_[output_idx] = create_out(sizes, strides, options);
29251 if (!names.empty()) {
29252 namedinference::propagate_names(*outputs_[output_idx], names);
29253 }
29254 // super must happen after, so that downstream can use maybe_get_output
29255 // to retrieve the output
29256 at::native::structured_special_bessel_j1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29257 }
29258 void set_output_raw_strided(
29259 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29260 TensorOptions options, DimnameList names
29261 ) override {
29262 outputs_[output_idx] = create_out(sizes, strides, options);
29263 if (!names.empty()) {
29264 namedinference::propagate_names(*outputs_[output_idx], names);
29265 }
29266 // super must happen after, so that downstream can use maybe_get_output
29267 // to retrieve the output
29268 at::native::structured_special_bessel_j1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29269 }
29270 const Tensor& maybe_get_output(int64_t output_idx) override {
29271 return *outputs_[output_idx];
29272 }
29273 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
29274};
29275at::Tensor wrapper_CPU_special_bessel_j1(const at::Tensor & self) {
29276structured_special_bessel_j1_out_functional op;
29277op.meta(self);
29278op.impl(self, *op.outputs_[0]);
29279return std::move(op.outputs_[0]).take();
29280}
29281struct structured_special_bessel_j1_out_out final : public at::native::structured_special_bessel_j1_out {
29282 structured_special_bessel_j1_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
29283 void set_output_strided(
29284 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29285 TensorOptions options, DimnameList names
29286 ) override {
29287 const auto& out = outputs_[output_idx].get();
29288 resize_out(out, sizes, strides, options);
29289 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
29290 if (C10_UNLIKELY(maybe_proxy.has_value())) {
29291 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
29292 }
29293 if (!names.empty()) {
29294 namedinference::propagate_names(outputs_[output_idx], names);
29295 }
29296 // super must happen after, so that downstream can use maybe_get_output
29297 // to retrieve the output
29298 at::native::structured_special_bessel_j1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29299 }
29300 void set_output_raw_strided(
29301 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29302 TensorOptions options, DimnameList names
29303 ) override {
29304 const auto& out = outputs_[output_idx].get();
29305 resize_out(out, sizes, strides, options);
29306 if (!names.empty()) {
29307 namedinference::propagate_names(outputs_[output_idx], names);
29308 }
29309 // super must happen after, so that downstream can use maybe_get_output
29310 // to retrieve the output
29311 at::native::structured_special_bessel_j1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29312 }
29313 const Tensor& maybe_get_output(int64_t output_idx) override {
29314 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
29315 }
29316 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
29317 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
29318};
29319at::Tensor & wrapper_CPU_special_bessel_j1_out_out(const at::Tensor & self, at::Tensor & out) {
29320structured_special_bessel_j1_out_out op(out);
29321op.meta(self);
29322op.impl(self, op.maybe_get_output(0));
29323if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
29324return out;
29325}
29326struct structured_special_bessel_y0_out_functional final : public at::native::structured_special_bessel_y0_out {
29327 void set_output_strided(
29328 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29329 TensorOptions options, DimnameList names
29330 ) override {
29331 outputs_[output_idx] = create_out(sizes, strides, options);
29332 if (!names.empty()) {
29333 namedinference::propagate_names(*outputs_[output_idx], names);
29334 }
29335 // super must happen after, so that downstream can use maybe_get_output
29336 // to retrieve the output
29337 at::native::structured_special_bessel_y0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29338 }
29339 void set_output_raw_strided(
29340 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29341 TensorOptions options, DimnameList names
29342 ) override {
29343 outputs_[output_idx] = create_out(sizes, strides, options);
29344 if (!names.empty()) {
29345 namedinference::propagate_names(*outputs_[output_idx], names);
29346 }
29347 // super must happen after, so that downstream can use maybe_get_output
29348 // to retrieve the output
29349 at::native::structured_special_bessel_y0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29350 }
29351 const Tensor& maybe_get_output(int64_t output_idx) override {
29352 return *outputs_[output_idx];
29353 }
29354 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
29355};
29356at::Tensor wrapper_CPU_special_bessel_y0(const at::Tensor & self) {
29357structured_special_bessel_y0_out_functional op;
29358op.meta(self);
29359op.impl(self, *op.outputs_[0]);
29360return std::move(op.outputs_[0]).take();
29361}
29362struct structured_special_bessel_y0_out_out final : public at::native::structured_special_bessel_y0_out {
29363 structured_special_bessel_y0_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
29364 void set_output_strided(
29365 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29366 TensorOptions options, DimnameList names
29367 ) override {
29368 const auto& out = outputs_[output_idx].get();
29369 resize_out(out, sizes, strides, options);
29370 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
29371 if (C10_UNLIKELY(maybe_proxy.has_value())) {
29372 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
29373 }
29374 if (!names.empty()) {
29375 namedinference::propagate_names(outputs_[output_idx], names);
29376 }
29377 // super must happen after, so that downstream can use maybe_get_output
29378 // to retrieve the output
29379 at::native::structured_special_bessel_y0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29380 }
29381 void set_output_raw_strided(
29382 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29383 TensorOptions options, DimnameList names
29384 ) override {
29385 const auto& out = outputs_[output_idx].get();
29386 resize_out(out, sizes, strides, options);
29387 if (!names.empty()) {
29388 namedinference::propagate_names(outputs_[output_idx], names);
29389 }
29390 // super must happen after, so that downstream can use maybe_get_output
29391 // to retrieve the output
29392 at::native::structured_special_bessel_y0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29393 }
29394 const Tensor& maybe_get_output(int64_t output_idx) override {
29395 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
29396 }
29397 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
29398 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
29399};
29400at::Tensor & wrapper_CPU_special_bessel_y0_out_out(const at::Tensor & self, at::Tensor & out) {
29401structured_special_bessel_y0_out_out op(out);
29402op.meta(self);
29403op.impl(self, op.maybe_get_output(0));
29404if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
29405return out;
29406}
29407struct structured_special_bessel_y1_out_functional final : public at::native::structured_special_bessel_y1_out {
29408 void set_output_strided(
29409 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29410 TensorOptions options, DimnameList names
29411 ) override {
29412 outputs_[output_idx] = create_out(sizes, strides, options);
29413 if (!names.empty()) {
29414 namedinference::propagate_names(*outputs_[output_idx], names);
29415 }
29416 // super must happen after, so that downstream can use maybe_get_output
29417 // to retrieve the output
29418 at::native::structured_special_bessel_y1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29419 }
29420 void set_output_raw_strided(
29421 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29422 TensorOptions options, DimnameList names
29423 ) override {
29424 outputs_[output_idx] = create_out(sizes, strides, options);
29425 if (!names.empty()) {
29426 namedinference::propagate_names(*outputs_[output_idx], names);
29427 }
29428 // super must happen after, so that downstream can use maybe_get_output
29429 // to retrieve the output
29430 at::native::structured_special_bessel_y1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29431 }
29432 const Tensor& maybe_get_output(int64_t output_idx) override {
29433 return *outputs_[output_idx];
29434 }
29435 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
29436};
29437at::Tensor wrapper_CPU_special_bessel_y1(const at::Tensor & self) {
29438structured_special_bessel_y1_out_functional op;
29439op.meta(self);
29440op.impl(self, *op.outputs_[0]);
29441return std::move(op.outputs_[0]).take();
29442}
29443struct structured_special_bessel_y1_out_out final : public at::native::structured_special_bessel_y1_out {
29444 structured_special_bessel_y1_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
29445 void set_output_strided(
29446 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29447 TensorOptions options, DimnameList names
29448 ) override {
29449 const auto& out = outputs_[output_idx].get();
29450 resize_out(out, sizes, strides, options);
29451 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
29452 if (C10_UNLIKELY(maybe_proxy.has_value())) {
29453 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
29454 }
29455 if (!names.empty()) {
29456 namedinference::propagate_names(outputs_[output_idx], names);
29457 }
29458 // super must happen after, so that downstream can use maybe_get_output
29459 // to retrieve the output
29460 at::native::structured_special_bessel_y1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29461 }
29462 void set_output_raw_strided(
29463 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29464 TensorOptions options, DimnameList names
29465 ) override {
29466 const auto& out = outputs_[output_idx].get();
29467 resize_out(out, sizes, strides, options);
29468 if (!names.empty()) {
29469 namedinference::propagate_names(outputs_[output_idx], names);
29470 }
29471 // super must happen after, so that downstream can use maybe_get_output
29472 // to retrieve the output
29473 at::native::structured_special_bessel_y1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29474 }
29475 const Tensor& maybe_get_output(int64_t output_idx) override {
29476 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
29477 }
29478 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
29479 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
29480};
29481at::Tensor & wrapper_CPU_special_bessel_y1_out_out(const at::Tensor & self, at::Tensor & out) {
29482structured_special_bessel_y1_out_out op(out);
29483op.meta(self);
29484op.impl(self, op.maybe_get_output(0));
29485if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
29486return out;
29487}
29488struct structured_special_chebyshev_polynomial_t_out_functional final : public at::native::structured_special_chebyshev_polynomial_t_out {
29489 void set_output_strided(
29490 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29491 TensorOptions options, DimnameList names
29492 ) override {
29493 outputs_[output_idx] = create_out(sizes, strides, options);
29494 if (!names.empty()) {
29495 namedinference::propagate_names(*outputs_[output_idx], names);
29496 }
29497 // super must happen after, so that downstream can use maybe_get_output
29498 // to retrieve the output
29499 at::native::structured_special_chebyshev_polynomial_t_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29500 }
29501 void set_output_raw_strided(
29502 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29503 TensorOptions options, DimnameList names
29504 ) override {
29505 outputs_[output_idx] = create_out(sizes, strides, options);
29506 if (!names.empty()) {
29507 namedinference::propagate_names(*outputs_[output_idx], names);
29508 }
29509 // super must happen after, so that downstream can use maybe_get_output
29510 // to retrieve the output
29511 at::native::structured_special_chebyshev_polynomial_t_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29512 }
29513 const Tensor& maybe_get_output(int64_t output_idx) override {
29514 return *outputs_[output_idx];
29515 }
29516 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
29517};
29518at::Tensor wrapper_CPU_special_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n) {
29519structured_special_chebyshev_polynomial_t_out_functional op;
29520op.meta(x, n);
29521op.impl(x, n, *op.outputs_[0]);
29522return std::move(op.outputs_[0]).take();
29523}
29524struct structured_special_chebyshev_polynomial_t_out_out final : public at::native::structured_special_chebyshev_polynomial_t_out {
29525 structured_special_chebyshev_polynomial_t_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
29526 void set_output_strided(
29527 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29528 TensorOptions options, DimnameList names
29529 ) override {
29530 const auto& out = outputs_[output_idx].get();
29531 resize_out(out, sizes, strides, options);
29532 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
29533 if (C10_UNLIKELY(maybe_proxy.has_value())) {
29534 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
29535 }
29536 if (!names.empty()) {
29537 namedinference::propagate_names(outputs_[output_idx], names);
29538 }
29539 // super must happen after, so that downstream can use maybe_get_output
29540 // to retrieve the output
29541 at::native::structured_special_chebyshev_polynomial_t_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29542 }
29543 void set_output_raw_strided(
29544 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29545 TensorOptions options, DimnameList names
29546 ) override {
29547 const auto& out = outputs_[output_idx].get();
29548 resize_out(out, sizes, strides, options);
29549 if (!names.empty()) {
29550 namedinference::propagate_names(outputs_[output_idx], names);
29551 }
29552 // super must happen after, so that downstream can use maybe_get_output
29553 // to retrieve the output
29554 at::native::structured_special_chebyshev_polynomial_t_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29555 }
29556 const Tensor& maybe_get_output(int64_t output_idx) override {
29557 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
29558 }
29559 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
29560 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
29561};
29562at::Tensor & wrapper_CPU_special_chebyshev_polynomial_t_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
29563structured_special_chebyshev_polynomial_t_out_out op(out);
29564op.meta(x, n);
29565op.impl(x, n, op.maybe_get_output(0));
29566if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
29567return out;
29568}
29569struct structured_special_chebyshev_polynomial_u_out_functional final : public at::native::structured_special_chebyshev_polynomial_u_out {
29570 void set_output_strided(
29571 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29572 TensorOptions options, DimnameList names
29573 ) override {
29574 outputs_[output_idx] = create_out(sizes, strides, options);
29575 if (!names.empty()) {
29576 namedinference::propagate_names(*outputs_[output_idx], names);
29577 }
29578 // super must happen after, so that downstream can use maybe_get_output
29579 // to retrieve the output
29580 at::native::structured_special_chebyshev_polynomial_u_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29581 }
29582 void set_output_raw_strided(
29583 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29584 TensorOptions options, DimnameList names
29585 ) override {
29586 outputs_[output_idx] = create_out(sizes, strides, options);
29587 if (!names.empty()) {
29588 namedinference::propagate_names(*outputs_[output_idx], names);
29589 }
29590 // super must happen after, so that downstream can use maybe_get_output
29591 // to retrieve the output
29592 at::native::structured_special_chebyshev_polynomial_u_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29593 }
29594 const Tensor& maybe_get_output(int64_t output_idx) override {
29595 return *outputs_[output_idx];
29596 }
29597 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
29598};
29599at::Tensor wrapper_CPU_special_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n) {
29600structured_special_chebyshev_polynomial_u_out_functional op;
29601op.meta(x, n);
29602op.impl(x, n, *op.outputs_[0]);
29603return std::move(op.outputs_[0]).take();
29604}
29605struct structured_special_chebyshev_polynomial_u_out_out final : public at::native::structured_special_chebyshev_polynomial_u_out {
29606 structured_special_chebyshev_polynomial_u_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
29607 void set_output_strided(
29608 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29609 TensorOptions options, DimnameList names
29610 ) override {
29611 const auto& out = outputs_[output_idx].get();
29612 resize_out(out, sizes, strides, options);
29613 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
29614 if (C10_UNLIKELY(maybe_proxy.has_value())) {
29615 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
29616 }
29617 if (!names.empty()) {
29618 namedinference::propagate_names(outputs_[output_idx], names);
29619 }
29620 // super must happen after, so that downstream can use maybe_get_output
29621 // to retrieve the output
29622 at::native::structured_special_chebyshev_polynomial_u_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29623 }
29624 void set_output_raw_strided(
29625 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29626 TensorOptions options, DimnameList names
29627 ) override {
29628 const auto& out = outputs_[output_idx].get();
29629 resize_out(out, sizes, strides, options);
29630 if (!names.empty()) {
29631 namedinference::propagate_names(outputs_[output_idx], names);
29632 }
29633 // super must happen after, so that downstream can use maybe_get_output
29634 // to retrieve the output
29635 at::native::structured_special_chebyshev_polynomial_u_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29636 }
29637 const Tensor& maybe_get_output(int64_t output_idx) override {
29638 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
29639 }
29640 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
29641 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
29642};
29643at::Tensor & wrapper_CPU_special_chebyshev_polynomial_u_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
29644structured_special_chebyshev_polynomial_u_out_out op(out);
29645op.meta(x, n);
29646op.impl(x, n, op.maybe_get_output(0));
29647if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
29648return out;
29649}
29650struct structured_special_chebyshev_polynomial_v_out_functional final : public at::native::structured_special_chebyshev_polynomial_v_out {
29651 void set_output_strided(
29652 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29653 TensorOptions options, DimnameList names
29654 ) override {
29655 outputs_[output_idx] = create_out(sizes, strides, options);
29656 if (!names.empty()) {
29657 namedinference::propagate_names(*outputs_[output_idx], names);
29658 }
29659 // super must happen after, so that downstream can use maybe_get_output
29660 // to retrieve the output
29661 at::native::structured_special_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29662 }
29663 void set_output_raw_strided(
29664 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29665 TensorOptions options, DimnameList names
29666 ) override {
29667 outputs_[output_idx] = create_out(sizes, strides, options);
29668 if (!names.empty()) {
29669 namedinference::propagate_names(*outputs_[output_idx], names);
29670 }
29671 // super must happen after, so that downstream can use maybe_get_output
29672 // to retrieve the output
29673 at::native::structured_special_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29674 }
29675 const Tensor& maybe_get_output(int64_t output_idx) override {
29676 return *outputs_[output_idx];
29677 }
29678 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
29679};
29680at::Tensor wrapper_CPU_special_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n) {
29681structured_special_chebyshev_polynomial_v_out_functional op;
29682op.meta(x, n);
29683op.impl(x, n, *op.outputs_[0]);
29684return std::move(op.outputs_[0]).take();
29685}
29686struct structured_special_chebyshev_polynomial_v_out_out final : public at::native::structured_special_chebyshev_polynomial_v_out {
29687 structured_special_chebyshev_polynomial_v_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
29688 void set_output_strided(
29689 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29690 TensorOptions options, DimnameList names
29691 ) override {
29692 const auto& out = outputs_[output_idx].get();
29693 resize_out(out, sizes, strides, options);
29694 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
29695 if (C10_UNLIKELY(maybe_proxy.has_value())) {
29696 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
29697 }
29698 if (!names.empty()) {
29699 namedinference::propagate_names(outputs_[output_idx], names);
29700 }
29701 // super must happen after, so that downstream can use maybe_get_output
29702 // to retrieve the output
29703 at::native::structured_special_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29704 }
29705 void set_output_raw_strided(
29706 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29707 TensorOptions options, DimnameList names
29708 ) override {
29709 const auto& out = outputs_[output_idx].get();
29710 resize_out(out, sizes, strides, options);
29711 if (!names.empty()) {
29712 namedinference::propagate_names(outputs_[output_idx], names);
29713 }
29714 // super must happen after, so that downstream can use maybe_get_output
29715 // to retrieve the output
29716 at::native::structured_special_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29717 }
29718 const Tensor& maybe_get_output(int64_t output_idx) override {
29719 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
29720 }
29721 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
29722 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
29723};
29724at::Tensor & wrapper_CPU_special_chebyshev_polynomial_v_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
29725structured_special_chebyshev_polynomial_v_out_out op(out);
29726op.meta(x, n);
29727op.impl(x, n, op.maybe_get_output(0));
29728if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
29729return out;
29730}
29731struct structured_special_chebyshev_polynomial_w_out_functional final : public at::native::structured_special_chebyshev_polynomial_w_out {
29732 void set_output_strided(
29733 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29734 TensorOptions options, DimnameList names
29735 ) override {
29736 outputs_[output_idx] = create_out(sizes, strides, options);
29737 if (!names.empty()) {
29738 namedinference::propagate_names(*outputs_[output_idx], names);
29739 }
29740 // super must happen after, so that downstream can use maybe_get_output
29741 // to retrieve the output
29742 at::native::structured_special_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29743 }
29744 void set_output_raw_strided(
29745 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29746 TensorOptions options, DimnameList names
29747 ) override {
29748 outputs_[output_idx] = create_out(sizes, strides, options);
29749 if (!names.empty()) {
29750 namedinference::propagate_names(*outputs_[output_idx], names);
29751 }
29752 // super must happen after, so that downstream can use maybe_get_output
29753 // to retrieve the output
29754 at::native::structured_special_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29755 }
29756 const Tensor& maybe_get_output(int64_t output_idx) override {
29757 return *outputs_[output_idx];
29758 }
29759 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
29760};
29761at::Tensor wrapper_CPU_special_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n) {
29762structured_special_chebyshev_polynomial_w_out_functional op;
29763op.meta(x, n);
29764op.impl(x, n, *op.outputs_[0]);
29765return std::move(op.outputs_[0]).take();
29766}
29767struct structured_special_chebyshev_polynomial_w_out_out final : public at::native::structured_special_chebyshev_polynomial_w_out {
29768 structured_special_chebyshev_polynomial_w_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
29769 void set_output_strided(
29770 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29771 TensorOptions options, DimnameList names
29772 ) override {
29773 const auto& out = outputs_[output_idx].get();
29774 resize_out(out, sizes, strides, options);
29775 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
29776 if (C10_UNLIKELY(maybe_proxy.has_value())) {
29777 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
29778 }
29779 if (!names.empty()) {
29780 namedinference::propagate_names(outputs_[output_idx], names);
29781 }
29782 // super must happen after, so that downstream can use maybe_get_output
29783 // to retrieve the output
29784 at::native::structured_special_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29785 }
29786 void set_output_raw_strided(
29787 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29788 TensorOptions options, DimnameList names
29789 ) override {
29790 const auto& out = outputs_[output_idx].get();
29791 resize_out(out, sizes, strides, options);
29792 if (!names.empty()) {
29793 namedinference::propagate_names(outputs_[output_idx], names);
29794 }
29795 // super must happen after, so that downstream can use maybe_get_output
29796 // to retrieve the output
29797 at::native::structured_special_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29798 }
29799 const Tensor& maybe_get_output(int64_t output_idx) override {
29800 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
29801 }
29802 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
29803 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
29804};
29805at::Tensor & wrapper_CPU_special_chebyshev_polynomial_w_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
29806structured_special_chebyshev_polynomial_w_out_out op(out);
29807op.meta(x, n);
29808op.impl(x, n, op.maybe_get_output(0));
29809if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
29810return out;
29811}
29812struct structured_special_hermite_polynomial_h_out_functional final : public at::native::structured_special_hermite_polynomial_h_out {
29813 void set_output_strided(
29814 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29815 TensorOptions options, DimnameList names
29816 ) override {
29817 outputs_[output_idx] = create_out(sizes, strides, options);
29818 if (!names.empty()) {
29819 namedinference::propagate_names(*outputs_[output_idx], names);
29820 }
29821 // super must happen after, so that downstream can use maybe_get_output
29822 // to retrieve the output
29823 at::native::structured_special_hermite_polynomial_h_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29824 }
29825 void set_output_raw_strided(
29826 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29827 TensorOptions options, DimnameList names
29828 ) override {
29829 outputs_[output_idx] = create_out(sizes, strides, options);
29830 if (!names.empty()) {
29831 namedinference::propagate_names(*outputs_[output_idx], names);
29832 }
29833 // super must happen after, so that downstream can use maybe_get_output
29834 // to retrieve the output
29835 at::native::structured_special_hermite_polynomial_h_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29836 }
29837 const Tensor& maybe_get_output(int64_t output_idx) override {
29838 return *outputs_[output_idx];
29839 }
29840 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
29841};
29842at::Tensor wrapper_CPU_special_hermite_polynomial_h(const at::Tensor & x, const at::Tensor & n) {
29843structured_special_hermite_polynomial_h_out_functional op;
29844op.meta(x, n);
29845op.impl(x, n, *op.outputs_[0]);
29846return std::move(op.outputs_[0]).take();
29847}
29848struct structured_special_hermite_polynomial_h_out_out final : public at::native::structured_special_hermite_polynomial_h_out {
29849 structured_special_hermite_polynomial_h_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
29850 void set_output_strided(
29851 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29852 TensorOptions options, DimnameList names
29853 ) override {
29854 const auto& out = outputs_[output_idx].get();
29855 resize_out(out, sizes, strides, options);
29856 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
29857 if (C10_UNLIKELY(maybe_proxy.has_value())) {
29858 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
29859 }
29860 if (!names.empty()) {
29861 namedinference::propagate_names(outputs_[output_idx], names);
29862 }
29863 // super must happen after, so that downstream can use maybe_get_output
29864 // to retrieve the output
29865 at::native::structured_special_hermite_polynomial_h_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29866 }
29867 void set_output_raw_strided(
29868 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29869 TensorOptions options, DimnameList names
29870 ) override {
29871 const auto& out = outputs_[output_idx].get();
29872 resize_out(out, sizes, strides, options);
29873 if (!names.empty()) {
29874 namedinference::propagate_names(outputs_[output_idx], names);
29875 }
29876 // super must happen after, so that downstream can use maybe_get_output
29877 // to retrieve the output
29878 at::native::structured_special_hermite_polynomial_h_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29879 }
29880 const Tensor& maybe_get_output(int64_t output_idx) override {
29881 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
29882 }
29883 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
29884 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
29885};
29886at::Tensor & wrapper_CPU_special_hermite_polynomial_h_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
29887structured_special_hermite_polynomial_h_out_out op(out);
29888op.meta(x, n);
29889op.impl(x, n, op.maybe_get_output(0));
29890if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
29891return out;
29892}
29893struct structured_special_hermite_polynomial_he_out_functional final : public at::native::structured_special_hermite_polynomial_he_out {
29894 void set_output_strided(
29895 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29896 TensorOptions options, DimnameList names
29897 ) override {
29898 outputs_[output_idx] = create_out(sizes, strides, options);
29899 if (!names.empty()) {
29900 namedinference::propagate_names(*outputs_[output_idx], names);
29901 }
29902 // super must happen after, so that downstream can use maybe_get_output
29903 // to retrieve the output
29904 at::native::structured_special_hermite_polynomial_he_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29905 }
29906 void set_output_raw_strided(
29907 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29908 TensorOptions options, DimnameList names
29909 ) override {
29910 outputs_[output_idx] = create_out(sizes, strides, options);
29911 if (!names.empty()) {
29912 namedinference::propagate_names(*outputs_[output_idx], names);
29913 }
29914 // super must happen after, so that downstream can use maybe_get_output
29915 // to retrieve the output
29916 at::native::structured_special_hermite_polynomial_he_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29917 }
29918 const Tensor& maybe_get_output(int64_t output_idx) override {
29919 return *outputs_[output_idx];
29920 }
29921 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
29922};
29923at::Tensor wrapper_CPU_special_hermite_polynomial_he(const at::Tensor & x, const at::Tensor & n) {
29924structured_special_hermite_polynomial_he_out_functional op;
29925op.meta(x, n);
29926op.impl(x, n, *op.outputs_[0]);
29927return std::move(op.outputs_[0]).take();
29928}
29929struct structured_special_hermite_polynomial_he_out_out final : public at::native::structured_special_hermite_polynomial_he_out {
29930 structured_special_hermite_polynomial_he_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
29931 void set_output_strided(
29932 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29933 TensorOptions options, DimnameList names
29934 ) override {
29935 const auto& out = outputs_[output_idx].get();
29936 resize_out(out, sizes, strides, options);
29937 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
29938 if (C10_UNLIKELY(maybe_proxy.has_value())) {
29939 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
29940 }
29941 if (!names.empty()) {
29942 namedinference::propagate_names(outputs_[output_idx], names);
29943 }
29944 // super must happen after, so that downstream can use maybe_get_output
29945 // to retrieve the output
29946 at::native::structured_special_hermite_polynomial_he_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29947 }
29948 void set_output_raw_strided(
29949 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29950 TensorOptions options, DimnameList names
29951 ) override {
29952 const auto& out = outputs_[output_idx].get();
29953 resize_out(out, sizes, strides, options);
29954 if (!names.empty()) {
29955 namedinference::propagate_names(outputs_[output_idx], names);
29956 }
29957 // super must happen after, so that downstream can use maybe_get_output
29958 // to retrieve the output
29959 at::native::structured_special_hermite_polynomial_he_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29960 }
29961 const Tensor& maybe_get_output(int64_t output_idx) override {
29962 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
29963 }
29964 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
29965 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
29966};
29967at::Tensor & wrapper_CPU_special_hermite_polynomial_he_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
29968structured_special_hermite_polynomial_he_out_out op(out);
29969op.meta(x, n);
29970op.impl(x, n, op.maybe_get_output(0));
29971if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
29972return out;
29973}
29974struct structured_special_laguerre_polynomial_l_out_functional final : public at::native::structured_special_laguerre_polynomial_l_out {
29975 void set_output_strided(
29976 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29977 TensorOptions options, DimnameList names
29978 ) override {
29979 outputs_[output_idx] = create_out(sizes, strides, options);
29980 if (!names.empty()) {
29981 namedinference::propagate_names(*outputs_[output_idx], names);
29982 }
29983 // super must happen after, so that downstream can use maybe_get_output
29984 // to retrieve the output
29985 at::native::structured_special_laguerre_polynomial_l_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29986 }
29987 void set_output_raw_strided(
29988 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
29989 TensorOptions options, DimnameList names
29990 ) override {
29991 outputs_[output_idx] = create_out(sizes, strides, options);
29992 if (!names.empty()) {
29993 namedinference::propagate_names(*outputs_[output_idx], names);
29994 }
29995 // super must happen after, so that downstream can use maybe_get_output
29996 // to retrieve the output
29997 at::native::structured_special_laguerre_polynomial_l_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
29998 }
29999 const Tensor& maybe_get_output(int64_t output_idx) override {
30000 return *outputs_[output_idx];
30001 }
30002 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
30003};
30004at::Tensor wrapper_CPU_special_laguerre_polynomial_l(const at::Tensor & x, const at::Tensor & n) {
30005structured_special_laguerre_polynomial_l_out_functional op;
30006op.meta(x, n);
30007op.impl(x, n, *op.outputs_[0]);
30008return std::move(op.outputs_[0]).take();
30009}
30010struct structured_special_laguerre_polynomial_l_out_out final : public at::native::structured_special_laguerre_polynomial_l_out {
30011 structured_special_laguerre_polynomial_l_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
30012 void set_output_strided(
30013 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30014 TensorOptions options, DimnameList names
30015 ) override {
30016 const auto& out = outputs_[output_idx].get();
30017 resize_out(out, sizes, strides, options);
30018 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
30019 if (C10_UNLIKELY(maybe_proxy.has_value())) {
30020 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
30021 }
30022 if (!names.empty()) {
30023 namedinference::propagate_names(outputs_[output_idx], names);
30024 }
30025 // super must happen after, so that downstream can use maybe_get_output
30026 // to retrieve the output
30027 at::native::structured_special_laguerre_polynomial_l_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30028 }
30029 void set_output_raw_strided(
30030 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30031 TensorOptions options, DimnameList names
30032 ) override {
30033 const auto& out = outputs_[output_idx].get();
30034 resize_out(out, sizes, strides, options);
30035 if (!names.empty()) {
30036 namedinference::propagate_names(outputs_[output_idx], names);
30037 }
30038 // super must happen after, so that downstream can use maybe_get_output
30039 // to retrieve the output
30040 at::native::structured_special_laguerre_polynomial_l_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30041 }
30042 const Tensor& maybe_get_output(int64_t output_idx) override {
30043 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
30044 }
30045 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
30046 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
30047};
30048at::Tensor & wrapper_CPU_special_laguerre_polynomial_l_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
30049structured_special_laguerre_polynomial_l_out_out op(out);
30050op.meta(x, n);
30051op.impl(x, n, op.maybe_get_output(0));
30052if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
30053return out;
30054}
30055struct structured_special_legendre_polynomial_p_out_functional final : public at::native::structured_special_legendre_polynomial_p_out {
30056 void set_output_strided(
30057 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30058 TensorOptions options, DimnameList names
30059 ) override {
30060 outputs_[output_idx] = create_out(sizes, strides, options);
30061 if (!names.empty()) {
30062 namedinference::propagate_names(*outputs_[output_idx], names);
30063 }
30064 // super must happen after, so that downstream can use maybe_get_output
30065 // to retrieve the output
30066 at::native::structured_special_legendre_polynomial_p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30067 }
30068 void set_output_raw_strided(
30069 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30070 TensorOptions options, DimnameList names
30071 ) override {
30072 outputs_[output_idx] = create_out(sizes, strides, options);
30073 if (!names.empty()) {
30074 namedinference::propagate_names(*outputs_[output_idx], names);
30075 }
30076 // super must happen after, so that downstream can use maybe_get_output
30077 // to retrieve the output
30078 at::native::structured_special_legendre_polynomial_p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30079 }
30080 const Tensor& maybe_get_output(int64_t output_idx) override {
30081 return *outputs_[output_idx];
30082 }
30083 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
30084};
30085at::Tensor wrapper_CPU_special_legendre_polynomial_p(const at::Tensor & x, const at::Tensor & n) {
30086structured_special_legendre_polynomial_p_out_functional op;
30087op.meta(x, n);
30088op.impl(x, n, *op.outputs_[0]);
30089return std::move(op.outputs_[0]).take();
30090}
30091struct structured_special_legendre_polynomial_p_out_out final : public at::native::structured_special_legendre_polynomial_p_out {
30092 structured_special_legendre_polynomial_p_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
30093 void set_output_strided(
30094 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30095 TensorOptions options, DimnameList names
30096 ) override {
30097 const auto& out = outputs_[output_idx].get();
30098 resize_out(out, sizes, strides, options);
30099 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
30100 if (C10_UNLIKELY(maybe_proxy.has_value())) {
30101 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
30102 }
30103 if (!names.empty()) {
30104 namedinference::propagate_names(outputs_[output_idx], names);
30105 }
30106 // super must happen after, so that downstream can use maybe_get_output
30107 // to retrieve the output
30108 at::native::structured_special_legendre_polynomial_p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30109 }
30110 void set_output_raw_strided(
30111 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30112 TensorOptions options, DimnameList names
30113 ) override {
30114 const auto& out = outputs_[output_idx].get();
30115 resize_out(out, sizes, strides, options);
30116 if (!names.empty()) {
30117 namedinference::propagate_names(outputs_[output_idx], names);
30118 }
30119 // super must happen after, so that downstream can use maybe_get_output
30120 // to retrieve the output
30121 at::native::structured_special_legendre_polynomial_p_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30122 }
30123 const Tensor& maybe_get_output(int64_t output_idx) override {
30124 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
30125 }
30126 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
30127 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
30128};
30129at::Tensor & wrapper_CPU_special_legendre_polynomial_p_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
30130structured_special_legendre_polynomial_p_out_out op(out);
30131op.meta(x, n);
30132op.impl(x, n, op.maybe_get_output(0));
30133if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
30134return out;
30135}
30136struct structured_special_modified_bessel_i0_out_functional final : public at::native::structured_special_modified_bessel_i0_out {
30137 void set_output_strided(
30138 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30139 TensorOptions options, DimnameList names
30140 ) override {
30141 outputs_[output_idx] = create_out(sizes, strides, options);
30142 if (!names.empty()) {
30143 namedinference::propagate_names(*outputs_[output_idx], names);
30144 }
30145 // super must happen after, so that downstream can use maybe_get_output
30146 // to retrieve the output
30147 at::native::structured_special_modified_bessel_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30148 }
30149 void set_output_raw_strided(
30150 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30151 TensorOptions options, DimnameList names
30152 ) override {
30153 outputs_[output_idx] = create_out(sizes, strides, options);
30154 if (!names.empty()) {
30155 namedinference::propagate_names(*outputs_[output_idx], names);
30156 }
30157 // super must happen after, so that downstream can use maybe_get_output
30158 // to retrieve the output
30159 at::native::structured_special_modified_bessel_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30160 }
30161 const Tensor& maybe_get_output(int64_t output_idx) override {
30162 return *outputs_[output_idx];
30163 }
30164 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
30165};
30166at::Tensor wrapper_CPU_special_modified_bessel_i0(const at::Tensor & self) {
30167structured_special_modified_bessel_i0_out_functional op;
30168op.meta(self);
30169op.impl(self, *op.outputs_[0]);
30170return std::move(op.outputs_[0]).take();
30171}
30172struct structured_special_modified_bessel_i0_out_out final : public at::native::structured_special_modified_bessel_i0_out {
30173 structured_special_modified_bessel_i0_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
30174 void set_output_strided(
30175 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30176 TensorOptions options, DimnameList names
30177 ) override {
30178 const auto& out = outputs_[output_idx].get();
30179 resize_out(out, sizes, strides, options);
30180 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
30181 if (C10_UNLIKELY(maybe_proxy.has_value())) {
30182 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
30183 }
30184 if (!names.empty()) {
30185 namedinference::propagate_names(outputs_[output_idx], names);
30186 }
30187 // super must happen after, so that downstream can use maybe_get_output
30188 // to retrieve the output
30189 at::native::structured_special_modified_bessel_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30190 }
30191 void set_output_raw_strided(
30192 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30193 TensorOptions options, DimnameList names
30194 ) override {
30195 const auto& out = outputs_[output_idx].get();
30196 resize_out(out, sizes, strides, options);
30197 if (!names.empty()) {
30198 namedinference::propagate_names(outputs_[output_idx], names);
30199 }
30200 // super must happen after, so that downstream can use maybe_get_output
30201 // to retrieve the output
30202 at::native::structured_special_modified_bessel_i0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30203 }
30204 const Tensor& maybe_get_output(int64_t output_idx) override {
30205 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
30206 }
30207 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
30208 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
30209};
30210at::Tensor & wrapper_CPU_special_modified_bessel_i0_out_out(const at::Tensor & self, at::Tensor & out) {
30211structured_special_modified_bessel_i0_out_out op(out);
30212op.meta(self);
30213op.impl(self, op.maybe_get_output(0));
30214if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
30215return out;
30216}
30217struct structured_special_modified_bessel_i1_out_functional final : public at::native::structured_special_modified_bessel_i1_out {
30218 void set_output_strided(
30219 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30220 TensorOptions options, DimnameList names
30221 ) override {
30222 outputs_[output_idx] = create_out(sizes, strides, options);
30223 if (!names.empty()) {
30224 namedinference::propagate_names(*outputs_[output_idx], names);
30225 }
30226 // super must happen after, so that downstream can use maybe_get_output
30227 // to retrieve the output
30228 at::native::structured_special_modified_bessel_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30229 }
30230 void set_output_raw_strided(
30231 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30232 TensorOptions options, DimnameList names
30233 ) override {
30234 outputs_[output_idx] = create_out(sizes, strides, options);
30235 if (!names.empty()) {
30236 namedinference::propagate_names(*outputs_[output_idx], names);
30237 }
30238 // super must happen after, so that downstream can use maybe_get_output
30239 // to retrieve the output
30240 at::native::structured_special_modified_bessel_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30241 }
30242 const Tensor& maybe_get_output(int64_t output_idx) override {
30243 return *outputs_[output_idx];
30244 }
30245 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
30246};
30247at::Tensor wrapper_CPU_special_modified_bessel_i1(const at::Tensor & self) {
30248structured_special_modified_bessel_i1_out_functional op;
30249op.meta(self);
30250op.impl(self, *op.outputs_[0]);
30251return std::move(op.outputs_[0]).take();
30252}
30253struct structured_special_modified_bessel_i1_out_out final : public at::native::structured_special_modified_bessel_i1_out {
30254 structured_special_modified_bessel_i1_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
30255 void set_output_strided(
30256 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30257 TensorOptions options, DimnameList names
30258 ) override {
30259 const auto& out = outputs_[output_idx].get();
30260 resize_out(out, sizes, strides, options);
30261 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
30262 if (C10_UNLIKELY(maybe_proxy.has_value())) {
30263 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
30264 }
30265 if (!names.empty()) {
30266 namedinference::propagate_names(outputs_[output_idx], names);
30267 }
30268 // super must happen after, so that downstream can use maybe_get_output
30269 // to retrieve the output
30270 at::native::structured_special_modified_bessel_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30271 }
30272 void set_output_raw_strided(
30273 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30274 TensorOptions options, DimnameList names
30275 ) override {
30276 const auto& out = outputs_[output_idx].get();
30277 resize_out(out, sizes, strides, options);
30278 if (!names.empty()) {
30279 namedinference::propagate_names(outputs_[output_idx], names);
30280 }
30281 // super must happen after, so that downstream can use maybe_get_output
30282 // to retrieve the output
30283 at::native::structured_special_modified_bessel_i1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30284 }
30285 const Tensor& maybe_get_output(int64_t output_idx) override {
30286 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
30287 }
30288 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
30289 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
30290};
30291at::Tensor & wrapper_CPU_special_modified_bessel_i1_out_out(const at::Tensor & self, at::Tensor & out) {
30292structured_special_modified_bessel_i1_out_out op(out);
30293op.meta(self);
30294op.impl(self, op.maybe_get_output(0));
30295if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
30296return out;
30297}
30298struct structured_special_modified_bessel_k0_out_functional final : public at::native::structured_special_modified_bessel_k0_out {
30299 void set_output_strided(
30300 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30301 TensorOptions options, DimnameList names
30302 ) override {
30303 outputs_[output_idx] = create_out(sizes, strides, options);
30304 if (!names.empty()) {
30305 namedinference::propagate_names(*outputs_[output_idx], names);
30306 }
30307 // super must happen after, so that downstream can use maybe_get_output
30308 // to retrieve the output
30309 at::native::structured_special_modified_bessel_k0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30310 }
30311 void set_output_raw_strided(
30312 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30313 TensorOptions options, DimnameList names
30314 ) override {
30315 outputs_[output_idx] = create_out(sizes, strides, options);
30316 if (!names.empty()) {
30317 namedinference::propagate_names(*outputs_[output_idx], names);
30318 }
30319 // super must happen after, so that downstream can use maybe_get_output
30320 // to retrieve the output
30321 at::native::structured_special_modified_bessel_k0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30322 }
30323 const Tensor& maybe_get_output(int64_t output_idx) override {
30324 return *outputs_[output_idx];
30325 }
30326 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
30327};
30328at::Tensor wrapper_CPU_special_modified_bessel_k0(const at::Tensor & self) {
30329structured_special_modified_bessel_k0_out_functional op;
30330op.meta(self);
30331op.impl(self, *op.outputs_[0]);
30332return std::move(op.outputs_[0]).take();
30333}
30334struct structured_special_modified_bessel_k0_out_out final : public at::native::structured_special_modified_bessel_k0_out {
30335 structured_special_modified_bessel_k0_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
30336 void set_output_strided(
30337 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30338 TensorOptions options, DimnameList names
30339 ) override {
30340 const auto& out = outputs_[output_idx].get();
30341 resize_out(out, sizes, strides, options);
30342 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
30343 if (C10_UNLIKELY(maybe_proxy.has_value())) {
30344 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
30345 }
30346 if (!names.empty()) {
30347 namedinference::propagate_names(outputs_[output_idx], names);
30348 }
30349 // super must happen after, so that downstream can use maybe_get_output
30350 // to retrieve the output
30351 at::native::structured_special_modified_bessel_k0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30352 }
30353 void set_output_raw_strided(
30354 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30355 TensorOptions options, DimnameList names
30356 ) override {
30357 const auto& out = outputs_[output_idx].get();
30358 resize_out(out, sizes, strides, options);
30359 if (!names.empty()) {
30360 namedinference::propagate_names(outputs_[output_idx], names);
30361 }
30362 // super must happen after, so that downstream can use maybe_get_output
30363 // to retrieve the output
30364 at::native::structured_special_modified_bessel_k0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30365 }
30366 const Tensor& maybe_get_output(int64_t output_idx) override {
30367 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
30368 }
30369 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
30370 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
30371};
30372at::Tensor & wrapper_CPU_special_modified_bessel_k0_out_out(const at::Tensor & self, at::Tensor & out) {
30373structured_special_modified_bessel_k0_out_out op(out);
30374op.meta(self);
30375op.impl(self, op.maybe_get_output(0));
30376if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
30377return out;
30378}
30379struct structured_special_modified_bessel_k1_out_functional final : public at::native::structured_special_modified_bessel_k1_out {
30380 void set_output_strided(
30381 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30382 TensorOptions options, DimnameList names
30383 ) override {
30384 outputs_[output_idx] = create_out(sizes, strides, options);
30385 if (!names.empty()) {
30386 namedinference::propagate_names(*outputs_[output_idx], names);
30387 }
30388 // super must happen after, so that downstream can use maybe_get_output
30389 // to retrieve the output
30390 at::native::structured_special_modified_bessel_k1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30391 }
30392 void set_output_raw_strided(
30393 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30394 TensorOptions options, DimnameList names
30395 ) override {
30396 outputs_[output_idx] = create_out(sizes, strides, options);
30397 if (!names.empty()) {
30398 namedinference::propagate_names(*outputs_[output_idx], names);
30399 }
30400 // super must happen after, so that downstream can use maybe_get_output
30401 // to retrieve the output
30402 at::native::structured_special_modified_bessel_k1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30403 }
30404 const Tensor& maybe_get_output(int64_t output_idx) override {
30405 return *outputs_[output_idx];
30406 }
30407 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
30408};
30409at::Tensor wrapper_CPU_special_modified_bessel_k1(const at::Tensor & self) {
30410structured_special_modified_bessel_k1_out_functional op;
30411op.meta(self);
30412op.impl(self, *op.outputs_[0]);
30413return std::move(op.outputs_[0]).take();
30414}
30415struct structured_special_modified_bessel_k1_out_out final : public at::native::structured_special_modified_bessel_k1_out {
30416 structured_special_modified_bessel_k1_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
30417 void set_output_strided(
30418 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30419 TensorOptions options, DimnameList names
30420 ) override {
30421 const auto& out = outputs_[output_idx].get();
30422 resize_out(out, sizes, strides, options);
30423 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
30424 if (C10_UNLIKELY(maybe_proxy.has_value())) {
30425 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
30426 }
30427 if (!names.empty()) {
30428 namedinference::propagate_names(outputs_[output_idx], names);
30429 }
30430 // super must happen after, so that downstream can use maybe_get_output
30431 // to retrieve the output
30432 at::native::structured_special_modified_bessel_k1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30433 }
30434 void set_output_raw_strided(
30435 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30436 TensorOptions options, DimnameList names
30437 ) override {
30438 const auto& out = outputs_[output_idx].get();
30439 resize_out(out, sizes, strides, options);
30440 if (!names.empty()) {
30441 namedinference::propagate_names(outputs_[output_idx], names);
30442 }
30443 // super must happen after, so that downstream can use maybe_get_output
30444 // to retrieve the output
30445 at::native::structured_special_modified_bessel_k1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30446 }
30447 const Tensor& maybe_get_output(int64_t output_idx) override {
30448 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
30449 }
30450 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
30451 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
30452};
30453at::Tensor & wrapper_CPU_special_modified_bessel_k1_out_out(const at::Tensor & self, at::Tensor & out) {
30454structured_special_modified_bessel_k1_out_out op(out);
30455op.meta(self);
30456op.impl(self, op.maybe_get_output(0));
30457if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
30458return out;
30459}
30460struct structured_special_scaled_modified_bessel_k0_out_functional final : public at::native::structured_special_scaled_modified_bessel_k0_out {
30461 void set_output_strided(
30462 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30463 TensorOptions options, DimnameList names
30464 ) override {
30465 outputs_[output_idx] = create_out(sizes, strides, options);
30466 if (!names.empty()) {
30467 namedinference::propagate_names(*outputs_[output_idx], names);
30468 }
30469 // super must happen after, so that downstream can use maybe_get_output
30470 // to retrieve the output
30471 at::native::structured_special_scaled_modified_bessel_k0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30472 }
30473 void set_output_raw_strided(
30474 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30475 TensorOptions options, DimnameList names
30476 ) override {
30477 outputs_[output_idx] = create_out(sizes, strides, options);
30478 if (!names.empty()) {
30479 namedinference::propagate_names(*outputs_[output_idx], names);
30480 }
30481 // super must happen after, so that downstream can use maybe_get_output
30482 // to retrieve the output
30483 at::native::structured_special_scaled_modified_bessel_k0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30484 }
30485 const Tensor& maybe_get_output(int64_t output_idx) override {
30486 return *outputs_[output_idx];
30487 }
30488 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
30489};
30490at::Tensor wrapper_CPU_special_scaled_modified_bessel_k0(const at::Tensor & x) {
30491structured_special_scaled_modified_bessel_k0_out_functional op;
30492op.meta(x);
30493op.impl(x, *op.outputs_[0]);
30494return std::move(op.outputs_[0]).take();
30495}
30496struct structured_special_scaled_modified_bessel_k0_out_out final : public at::native::structured_special_scaled_modified_bessel_k0_out {
30497 structured_special_scaled_modified_bessel_k0_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
30498 void set_output_strided(
30499 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30500 TensorOptions options, DimnameList names
30501 ) override {
30502 const auto& out = outputs_[output_idx].get();
30503 resize_out(out, sizes, strides, options);
30504 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
30505 if (C10_UNLIKELY(maybe_proxy.has_value())) {
30506 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
30507 }
30508 if (!names.empty()) {
30509 namedinference::propagate_names(outputs_[output_idx], names);
30510 }
30511 // super must happen after, so that downstream can use maybe_get_output
30512 // to retrieve the output
30513 at::native::structured_special_scaled_modified_bessel_k0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30514 }
30515 void set_output_raw_strided(
30516 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30517 TensorOptions options, DimnameList names
30518 ) override {
30519 const auto& out = outputs_[output_idx].get();
30520 resize_out(out, sizes, strides, options);
30521 if (!names.empty()) {
30522 namedinference::propagate_names(outputs_[output_idx], names);
30523 }
30524 // super must happen after, so that downstream can use maybe_get_output
30525 // to retrieve the output
30526 at::native::structured_special_scaled_modified_bessel_k0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30527 }
30528 const Tensor& maybe_get_output(int64_t output_idx) override {
30529 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
30530 }
30531 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
30532 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
30533};
30534at::Tensor & wrapper_CPU_special_scaled_modified_bessel_k0_out_out(const at::Tensor & x, at::Tensor & out) {
30535structured_special_scaled_modified_bessel_k0_out_out op(out);
30536op.meta(x);
30537op.impl(x, op.maybe_get_output(0));
30538if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
30539return out;
30540}
30541struct structured_special_scaled_modified_bessel_k1_out_functional final : public at::native::structured_special_scaled_modified_bessel_k1_out {
30542 void set_output_strided(
30543 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30544 TensorOptions options, DimnameList names
30545 ) override {
30546 outputs_[output_idx] = create_out(sizes, strides, options);
30547 if (!names.empty()) {
30548 namedinference::propagate_names(*outputs_[output_idx], names);
30549 }
30550 // super must happen after, so that downstream can use maybe_get_output
30551 // to retrieve the output
30552 at::native::structured_special_scaled_modified_bessel_k1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30553 }
30554 void set_output_raw_strided(
30555 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30556 TensorOptions options, DimnameList names
30557 ) override {
30558 outputs_[output_idx] = create_out(sizes, strides, options);
30559 if (!names.empty()) {
30560 namedinference::propagate_names(*outputs_[output_idx], names);
30561 }
30562 // super must happen after, so that downstream can use maybe_get_output
30563 // to retrieve the output
30564 at::native::structured_special_scaled_modified_bessel_k1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30565 }
30566 const Tensor& maybe_get_output(int64_t output_idx) override {
30567 return *outputs_[output_idx];
30568 }
30569 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
30570};
30571at::Tensor wrapper_CPU_special_scaled_modified_bessel_k1(const at::Tensor & x) {
30572structured_special_scaled_modified_bessel_k1_out_functional op;
30573op.meta(x);
30574op.impl(x, *op.outputs_[0]);
30575return std::move(op.outputs_[0]).take();
30576}
30577struct structured_special_scaled_modified_bessel_k1_out_out final : public at::native::structured_special_scaled_modified_bessel_k1_out {
30578 structured_special_scaled_modified_bessel_k1_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
30579 void set_output_strided(
30580 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30581 TensorOptions options, DimnameList names
30582 ) override {
30583 const auto& out = outputs_[output_idx].get();
30584 resize_out(out, sizes, strides, options);
30585 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
30586 if (C10_UNLIKELY(maybe_proxy.has_value())) {
30587 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
30588 }
30589 if (!names.empty()) {
30590 namedinference::propagate_names(outputs_[output_idx], names);
30591 }
30592 // super must happen after, so that downstream can use maybe_get_output
30593 // to retrieve the output
30594 at::native::structured_special_scaled_modified_bessel_k1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30595 }
30596 void set_output_raw_strided(
30597 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30598 TensorOptions options, DimnameList names
30599 ) override {
30600 const auto& out = outputs_[output_idx].get();
30601 resize_out(out, sizes, strides, options);
30602 if (!names.empty()) {
30603 namedinference::propagate_names(outputs_[output_idx], names);
30604 }
30605 // super must happen after, so that downstream can use maybe_get_output
30606 // to retrieve the output
30607 at::native::structured_special_scaled_modified_bessel_k1_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30608 }
30609 const Tensor& maybe_get_output(int64_t output_idx) override {
30610 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
30611 }
30612 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
30613 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
30614};
30615at::Tensor & wrapper_CPU_special_scaled_modified_bessel_k1_out_out(const at::Tensor & x, at::Tensor & out) {
30616structured_special_scaled_modified_bessel_k1_out_out op(out);
30617op.meta(x);
30618op.impl(x, op.maybe_get_output(0));
30619if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
30620return out;
30621}
30622struct structured_special_shifted_chebyshev_polynomial_t_out_functional final : public at::native::structured_special_shifted_chebyshev_polynomial_t_out {
30623 void set_output_strided(
30624 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30625 TensorOptions options, DimnameList names
30626 ) override {
30627 outputs_[output_idx] = create_out(sizes, strides, options);
30628 if (!names.empty()) {
30629 namedinference::propagate_names(*outputs_[output_idx], names);
30630 }
30631 // super must happen after, so that downstream can use maybe_get_output
30632 // to retrieve the output
30633 at::native::structured_special_shifted_chebyshev_polynomial_t_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30634 }
30635 void set_output_raw_strided(
30636 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30637 TensorOptions options, DimnameList names
30638 ) override {
30639 outputs_[output_idx] = create_out(sizes, strides, options);
30640 if (!names.empty()) {
30641 namedinference::propagate_names(*outputs_[output_idx], names);
30642 }
30643 // super must happen after, so that downstream can use maybe_get_output
30644 // to retrieve the output
30645 at::native::structured_special_shifted_chebyshev_polynomial_t_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30646 }
30647 const Tensor& maybe_get_output(int64_t output_idx) override {
30648 return *outputs_[output_idx];
30649 }
30650 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
30651};
30652at::Tensor wrapper_CPU_special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n) {
30653structured_special_shifted_chebyshev_polynomial_t_out_functional op;
30654op.meta(x, n);
30655op.impl(x, n, *op.outputs_[0]);
30656return std::move(op.outputs_[0]).take();
30657}
30658struct structured_special_shifted_chebyshev_polynomial_t_out_out final : public at::native::structured_special_shifted_chebyshev_polynomial_t_out {
30659 structured_special_shifted_chebyshev_polynomial_t_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
30660 void set_output_strided(
30661 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30662 TensorOptions options, DimnameList names
30663 ) override {
30664 const auto& out = outputs_[output_idx].get();
30665 resize_out(out, sizes, strides, options);
30666 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
30667 if (C10_UNLIKELY(maybe_proxy.has_value())) {
30668 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
30669 }
30670 if (!names.empty()) {
30671 namedinference::propagate_names(outputs_[output_idx], names);
30672 }
30673 // super must happen after, so that downstream can use maybe_get_output
30674 // to retrieve the output
30675 at::native::structured_special_shifted_chebyshev_polynomial_t_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30676 }
30677 void set_output_raw_strided(
30678 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30679 TensorOptions options, DimnameList names
30680 ) override {
30681 const auto& out = outputs_[output_idx].get();
30682 resize_out(out, sizes, strides, options);
30683 if (!names.empty()) {
30684 namedinference::propagate_names(outputs_[output_idx], names);
30685 }
30686 // super must happen after, so that downstream can use maybe_get_output
30687 // to retrieve the output
30688 at::native::structured_special_shifted_chebyshev_polynomial_t_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30689 }
30690 const Tensor& maybe_get_output(int64_t output_idx) override {
30691 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
30692 }
30693 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
30694 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
30695};
30696at::Tensor & wrapper_CPU_special_shifted_chebyshev_polynomial_t_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
30697structured_special_shifted_chebyshev_polynomial_t_out_out op(out);
30698op.meta(x, n);
30699op.impl(x, n, op.maybe_get_output(0));
30700if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
30701return out;
30702}
30703struct structured_special_shifted_chebyshev_polynomial_u_out_functional final : public at::native::structured_special_shifted_chebyshev_polynomial_u_out {
30704 void set_output_strided(
30705 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30706 TensorOptions options, DimnameList names
30707 ) override {
30708 outputs_[output_idx] = create_out(sizes, strides, options);
30709 if (!names.empty()) {
30710 namedinference::propagate_names(*outputs_[output_idx], names);
30711 }
30712 // super must happen after, so that downstream can use maybe_get_output
30713 // to retrieve the output
30714 at::native::structured_special_shifted_chebyshev_polynomial_u_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30715 }
30716 void set_output_raw_strided(
30717 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30718 TensorOptions options, DimnameList names
30719 ) override {
30720 outputs_[output_idx] = create_out(sizes, strides, options);
30721 if (!names.empty()) {
30722 namedinference::propagate_names(*outputs_[output_idx], names);
30723 }
30724 // super must happen after, so that downstream can use maybe_get_output
30725 // to retrieve the output
30726 at::native::structured_special_shifted_chebyshev_polynomial_u_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30727 }
30728 const Tensor& maybe_get_output(int64_t output_idx) override {
30729 return *outputs_[output_idx];
30730 }
30731 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
30732};
30733at::Tensor wrapper_CPU_special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n) {
30734structured_special_shifted_chebyshev_polynomial_u_out_functional op;
30735op.meta(x, n);
30736op.impl(x, n, *op.outputs_[0]);
30737return std::move(op.outputs_[0]).take();
30738}
30739struct structured_special_shifted_chebyshev_polynomial_u_out_out final : public at::native::structured_special_shifted_chebyshev_polynomial_u_out {
30740 structured_special_shifted_chebyshev_polynomial_u_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
30741 void set_output_strided(
30742 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30743 TensorOptions options, DimnameList names
30744 ) override {
30745 const auto& out = outputs_[output_idx].get();
30746 resize_out(out, sizes, strides, options);
30747 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
30748 if (C10_UNLIKELY(maybe_proxy.has_value())) {
30749 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
30750 }
30751 if (!names.empty()) {
30752 namedinference::propagate_names(outputs_[output_idx], names);
30753 }
30754 // super must happen after, so that downstream can use maybe_get_output
30755 // to retrieve the output
30756 at::native::structured_special_shifted_chebyshev_polynomial_u_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30757 }
30758 void set_output_raw_strided(
30759 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30760 TensorOptions options, DimnameList names
30761 ) override {
30762 const auto& out = outputs_[output_idx].get();
30763 resize_out(out, sizes, strides, options);
30764 if (!names.empty()) {
30765 namedinference::propagate_names(outputs_[output_idx], names);
30766 }
30767 // super must happen after, so that downstream can use maybe_get_output
30768 // to retrieve the output
30769 at::native::structured_special_shifted_chebyshev_polynomial_u_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30770 }
30771 const Tensor& maybe_get_output(int64_t output_idx) override {
30772 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
30773 }
30774 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
30775 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
30776};
30777at::Tensor & wrapper_CPU_special_shifted_chebyshev_polynomial_u_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
30778structured_special_shifted_chebyshev_polynomial_u_out_out op(out);
30779op.meta(x, n);
30780op.impl(x, n, op.maybe_get_output(0));
30781if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
30782return out;
30783}
30784struct structured_special_shifted_chebyshev_polynomial_v_out_functional final : public at::native::structured_special_shifted_chebyshev_polynomial_v_out {
30785 void set_output_strided(
30786 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30787 TensorOptions options, DimnameList names
30788 ) override {
30789 outputs_[output_idx] = create_out(sizes, strides, options);
30790 if (!names.empty()) {
30791 namedinference::propagate_names(*outputs_[output_idx], names);
30792 }
30793 // super must happen after, so that downstream can use maybe_get_output
30794 // to retrieve the output
30795 at::native::structured_special_shifted_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30796 }
30797 void set_output_raw_strided(
30798 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30799 TensorOptions options, DimnameList names
30800 ) override {
30801 outputs_[output_idx] = create_out(sizes, strides, options);
30802 if (!names.empty()) {
30803 namedinference::propagate_names(*outputs_[output_idx], names);
30804 }
30805 // super must happen after, so that downstream can use maybe_get_output
30806 // to retrieve the output
30807 at::native::structured_special_shifted_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30808 }
30809 const Tensor& maybe_get_output(int64_t output_idx) override {
30810 return *outputs_[output_idx];
30811 }
30812 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
30813};
30814at::Tensor wrapper_CPU_special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n) {
30815structured_special_shifted_chebyshev_polynomial_v_out_functional op;
30816op.meta(x, n);
30817op.impl(x, n, *op.outputs_[0]);
30818return std::move(op.outputs_[0]).take();
30819}
30820struct structured_special_shifted_chebyshev_polynomial_v_out_out final : public at::native::structured_special_shifted_chebyshev_polynomial_v_out {
30821 structured_special_shifted_chebyshev_polynomial_v_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
30822 void set_output_strided(
30823 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30824 TensorOptions options, DimnameList names
30825 ) override {
30826 const auto& out = outputs_[output_idx].get();
30827 resize_out(out, sizes, strides, options);
30828 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
30829 if (C10_UNLIKELY(maybe_proxy.has_value())) {
30830 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
30831 }
30832 if (!names.empty()) {
30833 namedinference::propagate_names(outputs_[output_idx], names);
30834 }
30835 // super must happen after, so that downstream can use maybe_get_output
30836 // to retrieve the output
30837 at::native::structured_special_shifted_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30838 }
30839 void set_output_raw_strided(
30840 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30841 TensorOptions options, DimnameList names
30842 ) override {
30843 const auto& out = outputs_[output_idx].get();
30844 resize_out(out, sizes, strides, options);
30845 if (!names.empty()) {
30846 namedinference::propagate_names(outputs_[output_idx], names);
30847 }
30848 // super must happen after, so that downstream can use maybe_get_output
30849 // to retrieve the output
30850 at::native::structured_special_shifted_chebyshev_polynomial_v_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30851 }
30852 const Tensor& maybe_get_output(int64_t output_idx) override {
30853 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
30854 }
30855 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
30856 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
30857};
30858at::Tensor & wrapper_CPU_special_shifted_chebyshev_polynomial_v_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
30859structured_special_shifted_chebyshev_polynomial_v_out_out op(out);
30860op.meta(x, n);
30861op.impl(x, n, op.maybe_get_output(0));
30862if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
30863return out;
30864}
30865struct structured_special_shifted_chebyshev_polynomial_w_out_functional final : public at::native::structured_special_shifted_chebyshev_polynomial_w_out {
30866 void set_output_strided(
30867 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30868 TensorOptions options, DimnameList names
30869 ) override {
30870 outputs_[output_idx] = create_out(sizes, strides, options);
30871 if (!names.empty()) {
30872 namedinference::propagate_names(*outputs_[output_idx], names);
30873 }
30874 // super must happen after, so that downstream can use maybe_get_output
30875 // to retrieve the output
30876 at::native::structured_special_shifted_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30877 }
30878 void set_output_raw_strided(
30879 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30880 TensorOptions options, DimnameList names
30881 ) override {
30882 outputs_[output_idx] = create_out(sizes, strides, options);
30883 if (!names.empty()) {
30884 namedinference::propagate_names(*outputs_[output_idx], names);
30885 }
30886 // super must happen after, so that downstream can use maybe_get_output
30887 // to retrieve the output
30888 at::native::structured_special_shifted_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30889 }
30890 const Tensor& maybe_get_output(int64_t output_idx) override {
30891 return *outputs_[output_idx];
30892 }
30893 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
30894};
30895at::Tensor wrapper_CPU_special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n) {
30896structured_special_shifted_chebyshev_polynomial_w_out_functional op;
30897op.meta(x, n);
30898op.impl(x, n, *op.outputs_[0]);
30899return std::move(op.outputs_[0]).take();
30900}
30901struct structured_special_shifted_chebyshev_polynomial_w_out_out final : public at::native::structured_special_shifted_chebyshev_polynomial_w_out {
30902 structured_special_shifted_chebyshev_polynomial_w_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
30903 void set_output_strided(
30904 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30905 TensorOptions options, DimnameList names
30906 ) override {
30907 const auto& out = outputs_[output_idx].get();
30908 resize_out(out, sizes, strides, options);
30909 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
30910 if (C10_UNLIKELY(maybe_proxy.has_value())) {
30911 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
30912 }
30913 if (!names.empty()) {
30914 namedinference::propagate_names(outputs_[output_idx], names);
30915 }
30916 // super must happen after, so that downstream can use maybe_get_output
30917 // to retrieve the output
30918 at::native::structured_special_shifted_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30919 }
30920 void set_output_raw_strided(
30921 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30922 TensorOptions options, DimnameList names
30923 ) override {
30924 const auto& out = outputs_[output_idx].get();
30925 resize_out(out, sizes, strides, options);
30926 if (!names.empty()) {
30927 namedinference::propagate_names(outputs_[output_idx], names);
30928 }
30929 // super must happen after, so that downstream can use maybe_get_output
30930 // to retrieve the output
30931 at::native::structured_special_shifted_chebyshev_polynomial_w_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30932 }
30933 const Tensor& maybe_get_output(int64_t output_idx) override {
30934 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
30935 }
30936 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
30937 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
30938};
30939at::Tensor & wrapper_CPU_special_shifted_chebyshev_polynomial_w_out_out(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
30940structured_special_shifted_chebyshev_polynomial_w_out_out op(out);
30941op.meta(x, n);
30942op.impl(x, n, op.maybe_get_output(0));
30943if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
30944return out;
30945}
30946struct structured_special_spherical_bessel_j0_out_functional final : public at::native::structured_special_spherical_bessel_j0_out {
30947 void set_output_strided(
30948 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30949 TensorOptions options, DimnameList names
30950 ) override {
30951 outputs_[output_idx] = create_out(sizes, strides, options);
30952 if (!names.empty()) {
30953 namedinference::propagate_names(*outputs_[output_idx], names);
30954 }
30955 // super must happen after, so that downstream can use maybe_get_output
30956 // to retrieve the output
30957 at::native::structured_special_spherical_bessel_j0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30958 }
30959 void set_output_raw_strided(
30960 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30961 TensorOptions options, DimnameList names
30962 ) override {
30963 outputs_[output_idx] = create_out(sizes, strides, options);
30964 if (!names.empty()) {
30965 namedinference::propagate_names(*outputs_[output_idx], names);
30966 }
30967 // super must happen after, so that downstream can use maybe_get_output
30968 // to retrieve the output
30969 at::native::structured_special_spherical_bessel_j0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
30970 }
30971 const Tensor& maybe_get_output(int64_t output_idx) override {
30972 return *outputs_[output_idx];
30973 }
30974 std::array<c10::ExclusivelyOwned<Tensor>, 1> outputs_;
30975};
30976at::Tensor wrapper_CPU_special_spherical_bessel_j0(const at::Tensor & x) {
30977structured_special_spherical_bessel_j0_out_functional op;
30978op.meta(x);
30979op.impl(x, *op.outputs_[0]);
30980return std::move(op.outputs_[0]).take();
30981}
30982struct structured_special_spherical_bessel_j0_out_out final : public at::native::structured_special_spherical_bessel_j0_out {
30983 structured_special_spherical_bessel_j0_out_out(Tensor& out0) : outputs_{ std::ref(out0) } {}
30984 void set_output_strided(
30985 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
30986 TensorOptions options, DimnameList names
30987 ) override {
30988 const auto& out = outputs_[output_idx].get();
30989 resize_out(out, sizes, strides, options);
30990 auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
30991 if (C10_UNLIKELY(maybe_proxy.has_value())) {
30992 proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
30993 }
30994 if (!names.empty()) {
30995 namedinference::propagate_names(outputs_[output_idx], names);
30996 }
30997 // super must happen after, so that downstream can use maybe_get_output
30998 // to retrieve the output
30999 at::native::structured_special_spherical_bessel_j0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
31000 }
31001 void set_output_raw_strided(
31002 int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
31003 TensorOptions options, DimnameList names
31004 ) override {
31005 const auto& out = outputs_[output_idx].get();
31006 resize_out(out, sizes, strides, options);
31007 if (!names.empty()) {
31008 namedinference::propagate_names(outputs_[output_idx], names);
31009 }
31010 // super must happen after, so that downstream can use maybe_get_output
31011 // to retrieve the output
31012 at::native::structured_special_spherical_bessel_j0_out::set_output_raw_strided(output_idx, sizes, strides, options, names);
31013 }
31014 const Tensor& maybe_get_output(int64_t output_idx) override {
31015 return proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get();
31016 }
31017 std::array<std::reference_wrapper<Tensor>, 1> outputs_;
31018 std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, 1> proxy_outputs_;
31019};
31020at::Tensor & wrapper_CPU_special_spherical_bessel_j0_out_out(const at::Tensor & x, at::Tensor & out) {
31021structured_special_spherical_bessel_j0_out_out op(out);
31022op.meta(x);
31023op.impl(x, op.maybe_get_output(0));
31024if (op.proxy_outputs_[0].has_value()) op.outputs_[0].get().copy_(**op.proxy_outputs_[0]);
31025return out;
31026}
31027namespace {
31028at::Tensor wrapper_CPU___foobar(const at::Tensor & self, bool arg1, bool arg2, bool arg3) {
31029 // No device check
31030 // DeviceGuard omitted
31031 return at::native::foobar(self, arg1, arg2, arg3);
31032}
31033} // anonymous namespace
31034TORCH_LIBRARY_IMPL(aten, CPU, m) {
31035 m.impl("_assert_async",
31036TORCH_FN(wrapper_CPU___assert_async));
31037m.impl("native_dropout",
31038TORCH_FN(wrapper_CPU__native_dropout));
31039m.impl("native_dropout_backward",
31040TORCH_FN(wrapper_CPU__native_dropout_backward));
31041m.impl("abs.out",
31042TORCH_FN(wrapper_CPU_out_abs_out));
31043m.impl("angle",
31044TORCH_FN(wrapper_CPU__angle));
31045m.impl("angle.out",
31046TORCH_FN(wrapper_CPU_out_angle_out));
31047m.impl("view_as_real",
31048TORCH_FN(wrapper_CPU__view_as_real));
31049m.impl("view_as_complex",
31050TORCH_FN(wrapper_CPU__view_as_complex));
31051m.impl("sgn", TORCH_FN(wrapper_CPU_sgn));
31052m.impl("sgn.out", TORCH_FN(wrapper_CPU_sgn_out_out));
31053m.impl("sgn_", TORCH_FN(wrapper_CPU_sgn_));
31054m.impl("conj_physical.out",
31055TORCH_FN(wrapper_CPU_out_conj_physical_out));
31056m.impl("acos", TORCH_FN(wrapper_CPU_acos));
31057m.impl("acos.out", TORCH_FN(wrapper_CPU_acos_out_out));
31058m.impl("acos_", TORCH_FN(wrapper_CPU_acos_));
31059m.impl("add.Tensor", TORCH_FN(wrapper_CPU_add_Tensor));
31060m.impl("add.out", TORCH_FN(wrapper_CPU_add_out_out));
31061m.impl("add_.Tensor", TORCH_FN(wrapper_CPU_add__Tensor));
31062m.impl("_add_relu.Tensor",
31063TORCH_FN(wrapper_CPU_Tensor__add_relu));
31064m.impl("_add_relu.out",
31065TORCH_FN(wrapper_CPU_out__add_relu_out));
31066m.impl("_add_relu_.Tensor",
31067TORCH_FN(wrapper_CPU_Tensor__add_relu_));
31068m.impl("_add_relu.Scalar",
31069TORCH_FN(wrapper_CPU_Scalar__add_relu));
31070m.impl("_add_relu_.Scalar",
31071TORCH_FN(wrapper_CPU_Scalar__add_relu_));
31072m.impl("addmv", TORCH_FN(wrapper_CPU_addmv));
31073m.impl("addmv.out", TORCH_FN(wrapper_CPU_addmv_out_out));
31074m.impl("addmv_", TORCH_FN(wrapper_CPU_addmv_));
31075m.impl("addr",
31076TORCH_FN(wrapper_CPU__addr));
31077m.impl("addr.out",
31078TORCH_FN(wrapper_CPU_out_addr_out));
31079m.impl("all.dim", TORCH_FN(wrapper_CPU_all_dim));
31080m.impl("all.out", TORCH_FN(wrapper_CPU_all_out_out));
31081m.impl("any.dim", TORCH_FN(wrapper_CPU_any_dim));
31082m.impl("any.out", TORCH_FN(wrapper_CPU_any_out_out));
31083m.impl("arange.start_out",
31084TORCH_FN(wrapper_CPU_start_out_arange_out));
31085m.impl("argmax", TORCH_FN(wrapper_CPU_argmax));
31086m.impl("argmax.out", TORCH_FN(wrapper_CPU_argmax_out_out));
31087m.impl("argmin", TORCH_FN(wrapper_CPU_argmin));
31088m.impl("argmin.out", TORCH_FN(wrapper_CPU_argmin_out_out));
31089m.impl("acosh", TORCH_FN(wrapper_CPU_acosh));
31090m.impl("acosh.out", TORCH_FN(wrapper_CPU_acosh_out_out));
31091m.impl("acosh_", TORCH_FN(wrapper_CPU_acosh_));
31092m.impl("asinh", TORCH_FN(wrapper_CPU_asinh));
31093m.impl("asinh.out", TORCH_FN(wrapper_CPU_asinh_out_out));
31094m.impl("asinh_", TORCH_FN(wrapper_CPU_asinh_));
31095m.impl("atanh", TORCH_FN(wrapper_CPU_atanh));
31096m.impl("atanh.out", TORCH_FN(wrapper_CPU_atanh_out_out));
31097m.impl("atanh_", TORCH_FN(wrapper_CPU_atanh_));
31098m.impl("as_strided",
31099TORCH_FN(wrapper_CPU__as_strided));
31100m.impl("asin", TORCH_FN(wrapper_CPU_asin));
31101m.impl("asin.out", TORCH_FN(wrapper_CPU_asin_out_out));
31102m.impl("asin_", TORCH_FN(wrapper_CPU_asin_));
31103m.impl("atan", TORCH_FN(wrapper_CPU_atan));
31104m.impl("atan.out", TORCH_FN(wrapper_CPU_atan_out_out));
31105m.impl("atan_", TORCH_FN(wrapper_CPU_atan_));
31106m.impl("baddbmm", TORCH_FN(wrapper_CPU_baddbmm));
31107m.impl("baddbmm.out", TORCH_FN(wrapper_CPU_baddbmm_out_out));
31108m.impl("baddbmm_", TORCH_FN(wrapper_CPU_baddbmm_));
31109m.impl("bernoulli.out",
31110TORCH_FN(wrapper_CPU_out_bernoulli_out));
31111m.impl("bernoulli_.Tensor",
31112TORCH_FN(wrapper_CPU_Tensor_bernoulli_));
31113m.impl("bernoulli_.float",
31114TORCH_FN(wrapper_CPU_float_bernoulli_));
31115m.impl("binary_cross_entropy",
31116TORCH_FN(wrapper_CPU__binary_cross_entropy));
31117m.impl("binary_cross_entropy.out",
31118TORCH_FN(wrapper_CPU_out_binary_cross_entropy_out));
31119m.impl("binary_cross_entropy_backward",
31120TORCH_FN(wrapper_CPU__binary_cross_entropy_backward));
31121m.impl("binary_cross_entropy_backward.grad_input",
31122TORCH_FN(wrapper_CPU_grad_input_binary_cross_entropy_backward_out));
31123m.impl("bincount",
31124TORCH_FN(wrapper_CPU__bincount));
31125m.impl("bitwise_not", TORCH_FN(wrapper_CPU_bitwise_not));
31126m.impl("bitwise_not.out", TORCH_FN(wrapper_CPU_bitwise_not_out_out));
31127m.impl("bitwise_not_", TORCH_FN(wrapper_CPU_bitwise_not_));
31128m.impl("copysign.Tensor", TORCH_FN(wrapper_CPU_copysign_Tensor));
31129m.impl("copysign.out", TORCH_FN(wrapper_CPU_copysign_out_out));
31130m.impl("copysign_.Tensor", TORCH_FN(wrapper_CPU_copysign__Tensor));
31131m.impl("logical_not.out",
31132TORCH_FN(wrapper_CPU_out_logical_not_out));
31133m.impl("logical_xor.out",
31134TORCH_FN(wrapper_CPU_out_logical_xor_out));
31135m.impl("logical_and.out",
31136TORCH_FN(wrapper_CPU_out_logical_and_out));
31137m.impl("logical_or.out",
31138TORCH_FN(wrapper_CPU_out_logical_or_out));
31139m.impl("bmm", TORCH_FN(wrapper_CPU_bmm));
31140m.impl("bmm.out", TORCH_FN(wrapper_CPU_bmm_out_out));
31141m.impl("cat", TORCH_FN(wrapper_CPU_cat));
31142m.impl("cat.out", TORCH_FN(wrapper_CPU_cat_out_out));
31143m.impl("ceil", TORCH_FN(wrapper_CPU_ceil));
31144m.impl("ceil.out", TORCH_FN(wrapper_CPU_ceil_out_out));
31145m.impl("ceil_", TORCH_FN(wrapper_CPU_ceil_));
31146m.impl("clamp", TORCH_FN(wrapper_CPU_clamp));
31147m.impl("clamp.out", TORCH_FN(wrapper_CPU_clamp_out_out));
31148m.impl("clamp_", TORCH_FN(wrapper_CPU_clamp_));
31149m.impl("clamp.Tensor", TORCH_FN(wrapper_CPU_clamp_Tensor));
31150m.impl("clamp.Tensor_out", TORCH_FN(wrapper_CPU_clamp_out_Tensor_out));
31151m.impl("clamp_.Tensor", TORCH_FN(wrapper_CPU_clamp__Tensor));
31152m.impl("clamp_max", TORCH_FN(wrapper_CPU_clamp_max));
31153m.impl("clamp_max.out", TORCH_FN(wrapper_CPU_clamp_max_out_out));
31154m.impl("clamp_max_", TORCH_FN(wrapper_CPU_clamp_max_));
31155m.impl("clamp_max.Tensor", TORCH_FN(wrapper_CPU_clamp_max_Tensor));
31156m.impl("clamp_max.Tensor_out", TORCH_FN(wrapper_CPU_clamp_max_out_Tensor_out));
31157m.impl("clamp_max_.Tensor", TORCH_FN(wrapper_CPU_clamp_max__Tensor));
31158m.impl("clamp_min", TORCH_FN(wrapper_CPU_clamp_min));
31159m.impl("clamp_min.out", TORCH_FN(wrapper_CPU_clamp_min_out_out));
31160m.impl("clamp_min_", TORCH_FN(wrapper_CPU_clamp_min_));
31161m.impl("clamp_min.Tensor", TORCH_FN(wrapper_CPU_clamp_min_Tensor));
31162m.impl("clamp_min.Tensor_out", TORCH_FN(wrapper_CPU_clamp_min_out_Tensor_out));
31163m.impl("clamp_min_.Tensor", TORCH_FN(wrapper_CPU_clamp_min__Tensor));
31164m.impl("complex.out",
31165TORCH_FN(wrapper_CPU_out_complex_out));
31166m.impl("polar.out",
31167TORCH_FN(wrapper_CPU_out_polar_out));
31168m.impl("cos", TORCH_FN(wrapper_CPU_cos));
31169m.impl("cos.out", TORCH_FN(wrapper_CPU_cos_out_out));
31170m.impl("cos_", TORCH_FN(wrapper_CPU_cos_));
31171m.impl("cosh", TORCH_FN(wrapper_CPU_cosh));
31172m.impl("cosh.out", TORCH_FN(wrapper_CPU_cosh_out_out));
31173m.impl("cosh_", TORCH_FN(wrapper_CPU_cosh_));
31174m.impl("count_nonzero.dim_IntList",
31175TORCH_FN(wrapper_CPU_dim_IntList_count_nonzero));
31176m.impl("_cummax_helper",
31177TORCH_FN(wrapper_CPU___cummax_helper));
31178m.impl("_cummin_helper",
31179TORCH_FN(wrapper_CPU___cummin_helper));
31180m.impl("cumprod", TORCH_FN(wrapper_CPU_cumprod));
31181m.impl("cumprod.out", TORCH_FN(wrapper_CPU_cumprod_out_out));
31182m.impl("cumprod_", TORCH_FN(wrapper_CPU_cumprod_));
31183m.impl("cumsum", TORCH_FN(wrapper_CPU_cumsum));
31184m.impl("cumsum.out", TORCH_FN(wrapper_CPU_cumsum_out_out));
31185m.impl("cumsum_", TORCH_FN(wrapper_CPU_cumsum_));
31186m.impl("_ctc_loss",
31187TORCH_FN(wrapper_CPU___ctc_loss));
31188m.impl("_ctc_loss.Tensor",
31189TORCH_FN(wrapper_CPU_Tensor__ctc_loss));
31190m.impl("_ctc_loss_backward",
31191TORCH_FN(wrapper_CPU___ctc_loss_backward));
31192m.impl("_ctc_loss_backward.Tensor",
31193TORCH_FN(wrapper_CPU_Tensor__ctc_loss_backward));
31194m.impl("div.Tensor", TORCH_FN(wrapper_CPU_div_Tensor));
31195m.impl("div.out", TORCH_FN(wrapper_CPU_div_out_out));
31196m.impl("div_.Tensor", TORCH_FN(wrapper_CPU_div__Tensor));
31197m.impl("div.Tensor_mode", TORCH_FN(wrapper_CPU_div_Tensor_mode));
31198m.impl("div.out_mode", TORCH_FN(wrapper_CPU_div_out_out_mode));
31199m.impl("div_.Tensor_mode", TORCH_FN(wrapper_CPU_div__Tensor_mode));
31200m.impl("dot",
31201TORCH_FN(wrapper_CPU__dot));
31202m.impl("vdot",
31203TORCH_FN(wrapper_CPU__vdot));
31204m.impl("embedding_dense_backward",
31205TORCH_FN(wrapper_CPU__embedding_dense_backward));
31206m.impl("embedding_renorm_",
31207TORCH_FN(wrapper_CPU__embedding_renorm_));
31208m.impl("_embedding_bag_forward_only",
31209TORCH_FN(wrapper_CPU___embedding_bag_forward_only));
31210m.impl("_embedding_bag",
31211TORCH_FN(wrapper_CPU___embedding_bag));
31212m.impl("_embedding_bag_dense_backward",
31213TORCH_FN(wrapper_CPU___embedding_bag_dense_backward));
31214m.impl("_embedding_bag_per_sample_weights_backward",
31215TORCH_FN(wrapper_CPU___embedding_bag_per_sample_weights_backward));
31216m.impl("empty.memory_format",
31217TORCH_FN(wrapper_CPU_memory_format_empty));
31218m.impl("_empty_affine_quantized",
31219TORCH_FN(wrapper_CPU___empty_affine_quantized));
31220m.impl("_empty_per_channel_affine_quantized",
31221TORCH_FN(wrapper_CPU___empty_per_channel_affine_quantized));
31222m.impl("resize_",
31223TORCH_FN(wrapper_CPU__resize_));
31224m.impl("empty_strided",
31225TORCH_FN(wrapper_CPU__empty_strided));
31226m.impl("erf", TORCH_FN(wrapper_CPU_erf));
31227m.impl("erf.out", TORCH_FN(wrapper_CPU_erf_out_out));
31228m.impl("erf_", TORCH_FN(wrapper_CPU_erf_));
31229m.impl("erfc", TORCH_FN(wrapper_CPU_erfc));
31230m.impl("erfc.out", TORCH_FN(wrapper_CPU_erfc_out_out));
31231m.impl("erfc_", TORCH_FN(wrapper_CPU_erfc_));
31232m.impl("exp", TORCH_FN(wrapper_CPU_exp));
31233m.impl("exp.out", TORCH_FN(wrapper_CPU_exp_out_out));
31234m.impl("exp_", TORCH_FN(wrapper_CPU_exp_));
31235m.impl("exp2", TORCH_FN(wrapper_CPU_exp2));
31236m.impl("exp2.out", TORCH_FN(wrapper_CPU_exp2_out_out));
31237m.impl("exp2_", TORCH_FN(wrapper_CPU_exp2_));
31238m.impl("expm1", TORCH_FN(wrapper_CPU_expm1));
31239m.impl("expm1.out", TORCH_FN(wrapper_CPU_expm1_out_out));
31240m.impl("expm1_", TORCH_FN(wrapper_CPU_expm1_));
31241m.impl("eye.out",
31242TORCH_FN(wrapper_CPU_out_eye_out));
31243m.impl("eye.m_out",
31244TORCH_FN(wrapper_CPU_m_out_eye_out));
31245m.impl("fill_.Scalar",
31246TORCH_FN(wrapper_CPU_Scalar_fill_));
31247m.impl("fill_.Tensor",
31248TORCH_FN(wrapper_CPU_Tensor_fill_));
31249m.impl("floor", TORCH_FN(wrapper_CPU_floor));
31250m.impl("floor.out", TORCH_FN(wrapper_CPU_floor_out_out));
31251m.impl("floor_", TORCH_FN(wrapper_CPU_floor_));
31252m.impl("floor_divide",
31253TORCH_FN(wrapper_CPU__floor_divide));
31254m.impl("floor_divide.out",
31255TORCH_FN(wrapper_CPU_out_floor_divide_out));
31256m.impl("floor_divide_.Tensor",
31257TORCH_FN(wrapper_CPU_Tensor_floor_divide_));
31258m.impl("frac", TORCH_FN(wrapper_CPU_frac));
31259m.impl("frac.out", TORCH_FN(wrapper_CPU_frac_out_out));
31260m.impl("frac_", TORCH_FN(wrapper_CPU_frac_));
31261m.impl("from_file",
31262TORCH_FN(wrapper_CPU__from_file));
31263m.impl("gcd", TORCH_FN(wrapper_CPU_gcd));
31264m.impl("gcd.out", TORCH_FN(wrapper_CPU_gcd_out_out));
31265m.impl("gcd_", TORCH_FN(wrapper_CPU_gcd_));
31266m.impl("lcm", TORCH_FN(wrapper_CPU_lcm));
31267m.impl("lcm.out", TORCH_FN(wrapper_CPU_lcm_out_out));
31268m.impl("lcm_", TORCH_FN(wrapper_CPU_lcm_));
31269m.impl("grid_sampler_2d",
31270TORCH_FN(wrapper_CPU__grid_sampler_2d));
31271m.impl("grid_sampler_2d_backward",
31272TORCH_FN(wrapper_CPU__grid_sampler_2d_backward));
31273m.impl("grid_sampler_3d",
31274TORCH_FN(wrapper_CPU__grid_sampler_3d));
31275m.impl("grid_sampler_3d_backward",
31276TORCH_FN(wrapper_CPU__grid_sampler_3d_backward));
31277m.impl("native_group_norm",
31278TORCH_FN(wrapper_CPU__native_group_norm));
31279m.impl("native_group_norm_backward",
31280TORCH_FN(wrapper_CPU__native_group_norm_backward));
31281m.impl("_fft_r2c",
31282TORCH_FN(wrapper_CPU___fft_r2c));
31283m.impl("_fft_r2c.out",
31284TORCH_FN(wrapper_CPU_out__fft_r2c_out));
31285m.impl("_fft_c2r",
31286TORCH_FN(wrapper_CPU___fft_c2r));
31287m.impl("_fft_c2r.out",
31288TORCH_FN(wrapper_CPU_out__fft_c2r_out));
31289m.impl("_fft_c2c",
31290TORCH_FN(wrapper_CPU___fft_c2c));
31291m.impl("_fft_c2c.out",
31292TORCH_FN(wrapper_CPU_out__fft_c2c_out));
31293m.impl("_validate_compressed_sparse_indices",
31294TORCH_FN(wrapper_CPU___validate_compressed_sparse_indices));
31295m.impl("index.Tensor", TORCH_FN(wrapper_CPU_index_Tensor));
31296m.impl("index.Tensor_out", TORCH_FN(wrapper_CPU_index_out_Tensor_out));
31297m.impl("index_copy", TORCH_FN(wrapper_CPU_index_copy));
31298m.impl("index_copy.out", TORCH_FN(wrapper_CPU_index_copy_out_out));
31299m.impl("index_copy_", TORCH_FN(wrapper_CPU_index_copy_));
31300m.impl("_index_put_impl_",
31301TORCH_FN(wrapper_CPU___index_put_impl_));
31302m.impl("isin.Tensor_Tensor", TORCH_FN(wrapper_CPU_isin_Tensor_Tensor));
31303m.impl("isin.Tensor_Tensor_out", TORCH_FN(wrapper_CPU_isin_out_Tensor_Tensor_out));
31304m.impl("isin.Tensor_Scalar", TORCH_FN(wrapper_CPU_isin_Tensor_Scalar));
31305m.impl("isin.Tensor_Scalar_out", TORCH_FN(wrapper_CPU_isin_out_Tensor_Scalar_out));
31306m.impl("isin.Scalar_Tensor", TORCH_FN(wrapper_CPU_isin_Scalar_Tensor));
31307m.impl("isin.Scalar_Tensor_out", TORCH_FN(wrapper_CPU_isin_out_Scalar_Tensor_out));
31308m.impl("isnan",
31309TORCH_FN(wrapper_CPU__isnan));
31310m.impl("kthvalue.values",
31311TORCH_FN(wrapper_CPU_values_kthvalue_out));
31312m.impl("native_layer_norm",
31313TORCH_FN(wrapper_CPU__native_layer_norm));
31314m.impl("native_layer_norm_backward",
31315TORCH_FN(wrapper_CPU__native_layer_norm_backward));
31316m.impl("nan_to_num.out",
31317TORCH_FN(wrapper_CPU_out_nan_to_num_out));
31318m.impl("linspace.out",
31319TORCH_FN(wrapper_CPU_out_linspace_out));
31320m.impl("log", TORCH_FN(wrapper_CPU_log));
31321m.impl("log.out", TORCH_FN(wrapper_CPU_log_out_out));
31322m.impl("log_", TORCH_FN(wrapper_CPU_log_));
31323m.impl("log10", TORCH_FN(wrapper_CPU_log10));
31324m.impl("log10.out", TORCH_FN(wrapper_CPU_log10_out_out));
31325m.impl("log10_", TORCH_FN(wrapper_CPU_log10_));
31326m.impl("log1p", TORCH_FN(wrapper_CPU_log1p));
31327m.impl("log1p.out", TORCH_FN(wrapper_CPU_log1p_out_out));
31328m.impl("log1p_", TORCH_FN(wrapper_CPU_log1p_));
31329m.impl("log2", TORCH_FN(wrapper_CPU_log2));
31330m.impl("log2.out", TORCH_FN(wrapper_CPU_log2_out_out));
31331m.impl("log2_", TORCH_FN(wrapper_CPU_log2_));
31332m.impl("logaddexp", TORCH_FN(wrapper_CPU_logaddexp));
31333m.impl("logaddexp.out", TORCH_FN(wrapper_CPU_logaddexp_out_out));
31334m.impl("logaddexp2", TORCH_FN(wrapper_CPU_logaddexp2));
31335m.impl("logaddexp2.out", TORCH_FN(wrapper_CPU_logaddexp2_out_out));
31336m.impl("xlogy.Tensor", TORCH_FN(wrapper_CPU_xlogy_Tensor));
31337m.impl("xlogy.OutTensor", TORCH_FN(wrapper_CPU_xlogy_out_OutTensor));
31338m.impl("xlogy_.Tensor", TORCH_FN(wrapper_CPU_xlogy__Tensor));
31339m.impl("logspace.out",
31340TORCH_FN(wrapper_CPU_out_logspace_out));
31341m.impl("_log_softmax", TORCH_FN(wrapper_CPU__log_softmax));
31342m.impl("_log_softmax.out", TORCH_FN(wrapper_CPU__log_softmax_out_out));
31343m.impl("_log_softmax_backward_data", TORCH_FN(wrapper_CPU__log_softmax_backward_data));
31344m.impl("_log_softmax_backward_data.out", TORCH_FN(wrapper_CPU__log_softmax_backward_data_out_out));
31345m.impl("_logcumsumexp",
31346TORCH_FN(wrapper_CPU___logcumsumexp));
31347m.impl("_logcumsumexp.out",
31348TORCH_FN(wrapper_CPU_out__logcumsumexp_out));
31349m.impl("_aminmax",
31350TORCH_FN(wrapper_CPU___aminmax));
31351m.impl("_aminmax.dim",
31352TORCH_FN(wrapper_CPU_dim__aminmax));
31353m.impl("aminmax", TORCH_FN(wrapper_CPU_aminmax));
31354m.impl("aminmax.out", TORCH_FN(wrapper_CPU_aminmax_out_out));
31355m.impl("_compute_linear_combination",
31356TORCH_FN(wrapper_CPU___compute_linear_combination));
31357m.impl("_compute_linear_combination.out",
31358TORCH_FN(wrapper_CPU_out__compute_linear_combination_out));
31359m.impl("max.dim", TORCH_FN(wrapper_CPU_max_dim));
31360m.impl("max.dim_max", TORCH_FN(wrapper_CPU_max_out_dim_max));
31361m.impl("amax", TORCH_FN(wrapper_CPU_amax));
31362m.impl("amax.out", TORCH_FN(wrapper_CPU_amax_out_out));
31363m.impl("mean.dim", TORCH_FN(wrapper_CPU_mean_dim));
31364m.impl("mean.out", TORCH_FN(wrapper_CPU_mean_out_out));
31365m.impl("median",
31366TORCH_FN(wrapper_CPU__median));
31367m.impl("median.dim_values",
31368TORCH_FN(wrapper_CPU_dim_values_median_out));
31369m.impl("nanmedian",
31370TORCH_FN(wrapper_CPU__nanmedian));
31371m.impl("nanmedian.dim_values",
31372TORCH_FN(wrapper_CPU_dim_values_nanmedian_out));
31373m.impl("min.dim", TORCH_FN(wrapper_CPU_min_dim));
31374m.impl("min.dim_min", TORCH_FN(wrapper_CPU_min_out_dim_min));
31375m.impl("amin", TORCH_FN(wrapper_CPU_amin));
31376m.impl("amin.out", TORCH_FN(wrapper_CPU_amin_out_out));
31377m.impl("mkldnn_rnn_layer",
31378TORCH_FN(wrapper_CPU__mkldnn_rnn_layer));
31379m.impl("mkldnn_rnn_layer_backward",
31380TORCH_FN(wrapper_CPU__mkldnn_rnn_layer_backward));
31381m.impl("mm", TORCH_FN(wrapper_CPU_mm));
31382m.impl("mm.out", TORCH_FN(wrapper_CPU_mm_out_out));
31383m.impl("mode",
31384TORCH_FN(wrapper_CPU__mode));
31385m.impl("mul.Tensor", TORCH_FN(wrapper_CPU_mul_Tensor));
31386m.impl("mul.out", TORCH_FN(wrapper_CPU_mul_out_out));
31387m.impl("mul_.Tensor", TORCH_FN(wrapper_CPU_mul__Tensor));
31388m.impl("mvlgamma.out",
31389TORCH_FN(wrapper_CPU_out_mvlgamma_out));
31390m.impl("narrow_copy",
31391TORCH_FN(wrapper_CPU__narrow_copy));
31392m.impl("narrow_copy.out",
31393TORCH_FN(wrapper_CPU_out_narrow_copy_out));
31394m.impl("native_batch_norm",
31395TORCH_FN(wrapper_CPU__native_batch_norm));
31396m.impl("native_batch_norm.out",
31397TORCH_FN(wrapper_CPU_out_native_batch_norm_out));
31398m.impl("_native_batch_norm_legit.out",
31399TORCH_FN(wrapper_CPU_out__native_batch_norm_legit_out));
31400m.impl("_native_batch_norm_legit",
31401TORCH_FN(wrapper_CPU___native_batch_norm_legit));
31402m.impl("_native_batch_norm_legit.no_stats",
31403TORCH_FN(wrapper_CPU_no_stats__native_batch_norm_legit));
31404m.impl("_native_batch_norm_legit.no_stats_out",
31405TORCH_FN(wrapper_CPU_no_stats_out__native_batch_norm_legit_out));
31406m.impl("native_batch_norm_backward",
31407TORCH_FN(wrapper_CPU__native_batch_norm_backward));
31408m.impl("batch_norm_update_stats",
31409TORCH_FN(wrapper_CPU__batch_norm_update_stats));
31410m.impl("_cdist_forward",
31411TORCH_FN(wrapper_CPU___cdist_forward));
31412m.impl("_cdist_backward",
31413TORCH_FN(wrapper_CPU___cdist_backward));
31414m.impl("_pdist_forward",
31415TORCH_FN(wrapper_CPU___pdist_forward));
31416m.impl("_pdist_backward",
31417TORCH_FN(wrapper_CPU___pdist_backward));
31418m.impl("pixel_shuffle",
31419TORCH_FN(wrapper_CPU__pixel_shuffle));
31420m.impl("pixel_unshuffle",
31421TORCH_FN(wrapper_CPU__pixel_unshuffle));
31422m.impl("channel_shuffle",
31423TORCH_FN(wrapper_CPU__channel_shuffle));
31424m.impl("native_channel_shuffle",
31425TORCH_FN(wrapper_CPU__native_channel_shuffle));
31426m.impl("randperm.generator_out",
31427TORCH_FN(wrapper_CPU_generator_out_randperm_out));
31428m.impl("range.out",
31429TORCH_FN(wrapper_CPU_out_range_out));
31430m.impl("reciprocal", TORCH_FN(wrapper_CPU_reciprocal));
31431m.impl("reciprocal.out", TORCH_FN(wrapper_CPU_reciprocal_out_out));
31432m.impl("reciprocal_", TORCH_FN(wrapper_CPU_reciprocal_));
31433m.impl("neg", TORCH_FN(wrapper_CPU_neg));
31434m.impl("neg.out", TORCH_FN(wrapper_CPU_neg_out_out));
31435m.impl("neg_", TORCH_FN(wrapper_CPU_neg_));
31436m.impl("repeat_interleave.Tensor",
31437TORCH_FN(wrapper_CPU_Tensor_repeat_interleave));
31438m.impl("_reshape_alias",
31439TORCH_FN(wrapper_CPU___reshape_alias));
31440m.impl("round", TORCH_FN(wrapper_CPU_round));
31441m.impl("round.out", TORCH_FN(wrapper_CPU_round_out_out));
31442m.impl("round_", TORCH_FN(wrapper_CPU_round_));
31443m.impl("round.decimals", TORCH_FN(wrapper_CPU_round_decimals));
31444m.impl("round.decimals_out", TORCH_FN(wrapper_CPU_round_out_decimals_out));
31445m.impl("round_.decimals", TORCH_FN(wrapper_CPU_round__decimals));
31446m.impl("relu",
31447TORCH_FN(wrapper_CPU__relu));
31448m.impl("relu_",
31449TORCH_FN(wrapper_CPU__relu_));
31450m.impl("_prelu_kernel",
31451TORCH_FN(wrapper_CPU___prelu_kernel));
31452m.impl("_prelu_kernel_backward",
31453TORCH_FN(wrapper_CPU___prelu_kernel_backward));
31454m.impl("gelu", TORCH_FN(wrapper_CPU_gelu));
31455m.impl("gelu.out", TORCH_FN(wrapper_CPU_gelu_out_out));
31456m.impl("gelu_", TORCH_FN(wrapper_CPU_gelu_));
31457m.impl("gelu_backward", TORCH_FN(wrapper_CPU_gelu_backward));
31458m.impl("gelu_backward.grad_input", TORCH_FN(wrapper_CPU_gelu_backward_out_grad_input));
31459m.impl("hardshrink", TORCH_FN(wrapper_CPU_hardshrink));
31460m.impl("hardshrink.out", TORCH_FN(wrapper_CPU_hardshrink_out_out));
31461m.impl("hardshrink_backward", TORCH_FN(wrapper_CPU_hardshrink_backward));
31462m.impl("hardshrink_backward.grad_input", TORCH_FN(wrapper_CPU_hardshrink_backward_out_grad_input));
31463m.impl("rsqrt", TORCH_FN(wrapper_CPU_rsqrt));
31464m.impl("rsqrt.out", TORCH_FN(wrapper_CPU_rsqrt_out_out));
31465m.impl("rsqrt_", TORCH_FN(wrapper_CPU_rsqrt_));
31466m.impl("silu", TORCH_FN(wrapper_CPU_silu));
31467m.impl("silu.out", TORCH_FN(wrapper_CPU_silu_out_out));
31468m.impl("silu_", TORCH_FN(wrapper_CPU_silu_));
31469m.impl("silu_backward", TORCH_FN(wrapper_CPU_silu_backward));
31470m.impl("silu_backward.grad_input", TORCH_FN(wrapper_CPU_silu_backward_out_grad_input));
31471m.impl("mish", TORCH_FN(wrapper_CPU_mish));
31472m.impl("mish.out", TORCH_FN(wrapper_CPU_mish_out_out));
31473m.impl("mish_", TORCH_FN(wrapper_CPU_mish_));
31474m.impl("mish_backward",
31475TORCH_FN(wrapper_CPU__mish_backward));
31476m.impl("sigmoid", TORCH_FN(wrapper_CPU_sigmoid));
31477m.impl("sigmoid.out", TORCH_FN(wrapper_CPU_sigmoid_out_out));
31478m.impl("sigmoid_", TORCH_FN(wrapper_CPU_sigmoid_));
31479m.impl("logit",
31480TORCH_FN(wrapper_CPU__logit));
31481m.impl("logit.out",
31482TORCH_FN(wrapper_CPU_out_logit_out));
31483m.impl("logit_",
31484TORCH_FN(wrapper_CPU__logit_));
31485m.impl("sin", TORCH_FN(wrapper_CPU_sin));
31486m.impl("sin.out", TORCH_FN(wrapper_CPU_sin_out_out));
31487m.impl("sin_", TORCH_FN(wrapper_CPU_sin_));
31488m.impl("sinc", TORCH_FN(wrapper_CPU_sinc));
31489m.impl("sinc.out", TORCH_FN(wrapper_CPU_sinc_out_out));
31490m.impl("sinc_", TORCH_FN(wrapper_CPU_sinc_));
31491m.impl("sinh", TORCH_FN(wrapper_CPU_sinh));
31492m.impl("sinh.out", TORCH_FN(wrapper_CPU_sinh_out_out));
31493m.impl("sinh_", TORCH_FN(wrapper_CPU_sinh_));
31494m.impl("_softmax", TORCH_FN(wrapper_CPU__softmax));
31495m.impl("_softmax.out", TORCH_FN(wrapper_CPU__softmax_out_out));
31496m.impl("_softmax_backward_data", TORCH_FN(wrapper_CPU__softmax_backward_data));
31497m.impl("_softmax_backward_data.out", TORCH_FN(wrapper_CPU__softmax_backward_data_out_out));
31498m.impl("sspaddmm.out",
31499TORCH_FN(wrapper_CPU_out_sspaddmm_out));
31500m.impl("_stack",
31501TORCH_FN(wrapper_CPU___stack));
31502m.impl("_stack.out",
31503TORCH_FN(wrapper_CPU_out__stack_out));
31504m.impl("sum.dim_IntList", TORCH_FN(wrapper_CPU_sum_dim_IntList));
31505m.impl("sum.IntList_out", TORCH_FN(wrapper_CPU_sum_out_IntList_out));
31506m.impl("nansum",
31507TORCH_FN(wrapper_CPU__nansum));
31508m.impl("nansum.out",
31509TORCH_FN(wrapper_CPU_out_nansum_out));
31510m.impl("sqrt", TORCH_FN(wrapper_CPU_sqrt));
31511m.impl("sqrt.out", TORCH_FN(wrapper_CPU_sqrt_out_out));
31512m.impl("sqrt_", TORCH_FN(wrapper_CPU_sqrt_));
31513m.impl("std.correction",
31514TORCH_FN(wrapper_CPU_correction_std));
31515m.impl("std.correction_out",
31516TORCH_FN(wrapper_CPU_correction_out_std_out));
31517m.impl("std_mean.correction",
31518TORCH_FN(wrapper_CPU_correction_std_mean));
31519m.impl("prod",
31520TORCH_FN(wrapper_CPU__prod));
31521m.impl("prod.dim_int", TORCH_FN(wrapper_CPU_prod_dim_int));
31522m.impl("prod.int_out", TORCH_FN(wrapper_CPU_prod_out_int_out));
31523m.impl("tan", TORCH_FN(wrapper_CPU_tan));
31524m.impl("tan.out", TORCH_FN(wrapper_CPU_tan_out_out));
31525m.impl("tan_", TORCH_FN(wrapper_CPU_tan_));
31526m.impl("tanh", TORCH_FN(wrapper_CPU_tanh));
31527m.impl("tanh.out", TORCH_FN(wrapper_CPU_tanh_out_out));
31528m.impl("tanh_", TORCH_FN(wrapper_CPU_tanh_));
31529m.impl("tensordot.out",
31530TORCH_FN(wrapper_CPU_out_tensordot_out));
31531m.impl("threshold", TORCH_FN(wrapper_CPU_threshold));
31532m.impl("threshold.out", TORCH_FN(wrapper_CPU_threshold_out_out));
31533m.impl("threshold_", TORCH_FN(wrapper_CPU_threshold_));
31534m.impl("threshold_backward", TORCH_FN(wrapper_CPU_threshold_backward));
31535m.impl("threshold_backward.grad_input", TORCH_FN(wrapper_CPU_threshold_backward_out_grad_input));
31536m.impl("flip",
31537TORCH_FN(wrapper_CPU__flip));
31538m.impl("roll",
31539TORCH_FN(wrapper_CPU__roll));
31540m.impl("_transform_bias_rescale_qkv",
31541TORCH_FN(wrapper_CPU___transform_bias_rescale_qkv));
31542m.impl("_nested_tensor_from_mask",
31543TORCH_FN(wrapper_CPU___nested_tensor_from_mask));
31544m.impl("_nested_tensor_from_mask_left_aligned",
31545TORCH_FN(wrapper_CPU___nested_tensor_from_mask_left_aligned));
31546m.impl("_nested_from_padded",
31547TORCH_FN(wrapper_CPU___nested_from_padded));
31548m.impl("_nested_view_from_buffer",
31549TORCH_FN(wrapper_CPU___nested_view_from_buffer));
31550m.impl("trunc", TORCH_FN(wrapper_CPU_trunc));
31551m.impl("trunc.out", TORCH_FN(wrapper_CPU_trunc_out_out));
31552m.impl("trunc_", TORCH_FN(wrapper_CPU_trunc_));
31553m.impl("_unique",
31554TORCH_FN(wrapper_CPU___unique));
31555m.impl("unique_dim",
31556TORCH_FN(wrapper_CPU__unique_dim));
31557m.impl("unique_consecutive",
31558TORCH_FN(wrapper_CPU__unique_consecutive));
31559m.impl("unique_dim_consecutive",
31560TORCH_FN(wrapper_CPU__unique_dim_consecutive));
31561m.impl("_unique2",
31562TORCH_FN(wrapper_CPU___unique2));
31563m.impl("var.correction",
31564TORCH_FN(wrapper_CPU_correction_var));
31565m.impl("var.correction_out",
31566TORCH_FN(wrapper_CPU_correction_out_var_out));
31567m.impl("var_mean.correction",
31568TORCH_FN(wrapper_CPU_correction_var_mean));
31569m.impl("where.self",
31570TORCH_FN(wrapper_CPU_self_where));
31571m.impl("where.self_out",
31572TORCH_FN(wrapper_CPU_self_out_where_out));
31573m.impl("_weight_norm_interface",
31574TORCH_FN(wrapper_CPU___weight_norm_interface));
31575m.impl("_weight_norm_interface_backward",
31576TORCH_FN(wrapper_CPU___weight_norm_interface_backward));
31577m.impl("_efficientzerotensor",
31578TORCH_FN(wrapper_CPU___efficientzerotensor));
31579m.impl("_standard_gamma_grad",
31580TORCH_FN(wrapper_CPU___standard_gamma_grad));
31581m.impl("_standard_gamma",
31582TORCH_FN(wrapper_CPU___standard_gamma));
31583m.impl("_dirichlet_grad",
31584TORCH_FN(wrapper_CPU___dirichlet_grad));
31585m.impl("_sample_dirichlet",
31586TORCH_FN(wrapper_CPU___sample_dirichlet));
31587m.impl("poisson",
31588TORCH_FN(wrapper_CPU__poisson));
31589m.impl("binomial",
31590TORCH_FN(wrapper_CPU__binomial));
31591m.impl("_spdiags",
31592TORCH_FN(wrapper_CPU___spdiags));
31593m.impl("norm.ScalarOpt_dim_dtype", TORCH_FN(wrapper_CPU_norm_ScalarOpt_dim_dtype));
31594m.impl("norm.dtype_out", TORCH_FN(wrapper_CPU_norm_out_dtype_out));
31595m.impl("norm.ScalarOpt_dim", TORCH_FN(wrapper_CPU_norm_ScalarOpt_dim));
31596m.impl("norm.out", TORCH_FN(wrapper_CPU_norm_out_out));
31597m.impl("frexp.Tensor_out",
31598TORCH_FN(wrapper_CPU_Tensor_out_frexp_out));
31599m.impl("zero_",
31600TORCH_FN(wrapper_CPU__zero_));
31601m.impl("sub.Tensor", TORCH_FN(wrapper_CPU_sub_Tensor));
31602m.impl("sub.out", TORCH_FN(wrapper_CPU_sub_out_out));
31603m.impl("sub_.Tensor", TORCH_FN(wrapper_CPU_sub__Tensor));
31604m.impl("rsub.Tensor",
31605TORCH_FN(wrapper_CPU_Tensor_rsub));
31606m.impl("heaviside", TORCH_FN(wrapper_CPU_heaviside));
31607m.impl("heaviside.out", TORCH_FN(wrapper_CPU_heaviside_out_out));
31608m.impl("heaviside_", TORCH_FN(wrapper_CPU_heaviside_));
31609m.impl("addmm", TORCH_FN(wrapper_CPU_addmm));
31610m.impl("addmm.out", TORCH_FN(wrapper_CPU_addmm_out_out));
31611m.impl("addmm_", TORCH_FN(wrapper_CPU_addmm_));
31612m.impl("_addmm_activation", TORCH_FN(wrapper_CPU__addmm_activation));
31613m.impl("_addmm_activation.out", TORCH_FN(wrapper_CPU__addmm_activation_out_out));
31614m.impl("sparse_dim",
31615TORCH_FN(wrapper_CPU__sparse_dim));
31616m.impl("dense_dim",
31617TORCH_FN(wrapper_CPU__dense_dim));
31618m.impl("to_sparse.sparse_dim",
31619TORCH_FN(wrapper_CPU_sparse_dim_to_sparse));
31620m.impl("to_sparse",
31621TORCH_FN(wrapper_CPU__to_sparse));
31622m.impl("to_sparse_csr",
31623TORCH_FN(wrapper_CPU__to_sparse_csr));
31624m.impl("to_sparse_csc",
31625TORCH_FN(wrapper_CPU__to_sparse_csc));
31626m.impl("to_sparse_bsr",
31627TORCH_FN(wrapper_CPU__to_sparse_bsr));
31628m.impl("to_sparse_bsc",
31629TORCH_FN(wrapper_CPU__to_sparse_bsc));
31630m.impl("to_mkldnn",
31631TORCH_FN(wrapper_CPU__to_mkldnn));
31632m.impl("quantize_per_tensor_dynamic",
31633TORCH_FN(wrapper_CPU__quantize_per_tensor_dynamic));
31634m.impl("quantize_per_tensor",
31635TORCH_FN(wrapper_CPU__quantize_per_tensor));
31636m.impl("quantize_per_tensor.tensor_qparams",
31637TORCH_FN(wrapper_CPU_tensor_qparams_quantize_per_tensor));
31638m.impl("quantize_per_tensor.tensors",
31639TORCH_FN(wrapper_CPU_tensors_quantize_per_tensor));
31640m.impl("quantize_per_channel",
31641TORCH_FN(wrapper_CPU__quantize_per_channel));
31642m.impl("dequantize.self",
31643TORCH_FN(wrapper_CPU_self_dequantize));
31644m.impl("_make_per_tensor_quantized_tensor",
31645TORCH_FN(wrapper_CPU___make_per_tensor_quantized_tensor));
31646m.impl("_make_per_channel_quantized_tensor",
31647TORCH_FN(wrapper_CPU___make_per_channel_quantized_tensor));
31648m.impl("fake_quantize_per_tensor_affine_cachemask",
31649TORCH_FN(wrapper_CPU__fake_quantize_per_tensor_affine_cachemask));
31650m.impl("_fake_quantize_per_tensor_affine_cachemask_tensor_qparams",
31651TORCH_FN(wrapper_CPU___fake_quantize_per_tensor_affine_cachemask_tensor_qparams));
31652m.impl("_fake_quantize_learnable_per_tensor_affine",
31653TORCH_FN(wrapper_CPU___fake_quantize_learnable_per_tensor_affine));
31654m.impl("_fake_quantize_learnable_per_tensor_affine_backward",
31655TORCH_FN(wrapper_CPU___fake_quantize_learnable_per_tensor_affine_backward));
31656m.impl("fake_quantize_per_channel_affine_cachemask",
31657TORCH_FN(wrapper_CPU__fake_quantize_per_channel_affine_cachemask));
31658m.impl("_fake_quantize_learnable_per_channel_affine",
31659TORCH_FN(wrapper_CPU___fake_quantize_learnable_per_channel_affine));
31660m.impl("_fake_quantize_learnable_per_channel_affine_backward",
31661TORCH_FN(wrapper_CPU___fake_quantize_learnable_per_channel_affine_backward));
31662m.impl("_fused_moving_avg_obs_fq_helper",
31663TORCH_FN(wrapper_CPU___fused_moving_avg_obs_fq_helper));
31664m.impl("_local_scalar_dense",
31665TORCH_FN(wrapper_CPU___local_scalar_dense));
31666m.impl("set_.source_Storage",
31667TORCH_FN(wrapper_CPU_source_Storage_set_));
31668m.impl("set_.source_Storage_storage_offset",
31669TORCH_FN(wrapper_CPU_source_Storage_storage_offset_set_));
31670m.impl("set_.source_Tensor",
31671TORCH_FN(wrapper_CPU_source_Tensor_set_));
31672m.impl("set_",
31673TORCH_FN(wrapper_CPU__set_));
31674m.impl("is_set_to",
31675TORCH_FN(wrapper_CPU__is_set_to));
31676m.impl("masked_fill_.Scalar",
31677TORCH_FN(wrapper_CPU_Scalar_masked_fill_));
31678m.impl("masked_fill_.Tensor",
31679TORCH_FN(wrapper_CPU_Tensor_masked_fill_));
31680m.impl("masked_scatter_",
31681TORCH_FN(wrapper_CPU__masked_scatter_));
31682m.impl("_masked_softmax",
31683TORCH_FN(wrapper_CPU___masked_softmax));
31684m.impl("_masked_softmax_backward",
31685TORCH_FN(wrapper_CPU___masked_softmax_backward));
31686m.impl("view",
31687TORCH_FN(wrapper_CPU__view));
31688m.impl("put_",
31689TORCH_FN(wrapper_CPU__put_));
31690m.impl("index_add", TORCH_FN(wrapper_CPU_index_add));
31691m.impl("index_add.out", TORCH_FN(wrapper_CPU_index_add_out_out));
31692m.impl("index_add_", TORCH_FN(wrapper_CPU_index_add_));
31693m.impl("index_reduce", TORCH_FN(wrapper_CPU_index_reduce));
31694m.impl("index_reduce.out", TORCH_FN(wrapper_CPU_index_reduce_out_out));
31695m.impl("index_reduce_", TORCH_FN(wrapper_CPU_index_reduce_));
31696m.impl("index_fill_.int_Scalar",
31697TORCH_FN(wrapper_CPU_int_Scalar_index_fill_));
31698m.impl("index_fill_.int_Tensor",
31699TORCH_FN(wrapper_CPU_int_Tensor_index_fill_));
31700m.impl("scatter.src", TORCH_FN(wrapper_CPU_scatter_src));
31701m.impl("scatter.src_out", TORCH_FN(wrapper_CPU_scatter_out_src_out));
31702m.impl("scatter_.src", TORCH_FN(wrapper_CPU_scatter__src));
31703m.impl("scatter.value", TORCH_FN(wrapper_CPU_scatter_value));
31704m.impl("scatter.value_out", TORCH_FN(wrapper_CPU_scatter_out_value_out));
31705m.impl("scatter_.value", TORCH_FN(wrapper_CPU_scatter__value));
31706m.impl("scatter.reduce", TORCH_FN(wrapper_CPU_scatter_reduce));
31707m.impl("scatter.reduce_out", TORCH_FN(wrapper_CPU_scatter_out_reduce_out));
31708m.impl("scatter_.reduce", TORCH_FN(wrapper_CPU_scatter__reduce));
31709m.impl("scatter.value_reduce", TORCH_FN(wrapper_CPU_scatter_value_reduce));
31710m.impl("scatter.value_reduce_out", TORCH_FN(wrapper_CPU_scatter_out_value_reduce_out));
31711m.impl("scatter_.value_reduce", TORCH_FN(wrapper_CPU_scatter__value_reduce));
31712m.impl("scatter_add", TORCH_FN(wrapper_CPU_scatter_add));
31713m.impl("scatter_add.out", TORCH_FN(wrapper_CPU_scatter_add_out_out));
31714m.impl("scatter_add_", TORCH_FN(wrapper_CPU_scatter_add_));
31715m.impl("scatter_reduce.two", TORCH_FN(wrapper_CPU_scatter_reduce_two));
31716m.impl("scatter_reduce.two_out", TORCH_FN(wrapper_CPU_scatter_reduce_out_two_out));
31717m.impl("scatter_reduce_.two", TORCH_FN(wrapper_CPU_scatter_reduce__two));
31718m.impl("eq.Scalar", TORCH_FN(wrapper_CPU_eq_Scalar));
31719m.impl("eq.Scalar_out", TORCH_FN(wrapper_CPU_eq_out_Scalar_out));
31720m.impl("eq_.Scalar", TORCH_FN(wrapper_CPU_eq__Scalar));
31721m.impl("eq.Tensor", TORCH_FN(wrapper_CPU_eq_Tensor));
31722m.impl("eq.Tensor_out", TORCH_FN(wrapper_CPU_eq_out_Tensor_out));
31723m.impl("eq_.Tensor", TORCH_FN(wrapper_CPU_eq__Tensor));
31724m.impl("bitwise_and.Tensor", TORCH_FN(wrapper_CPU_bitwise_and_Tensor));
31725m.impl("bitwise_and.Tensor_out", TORCH_FN(wrapper_CPU_bitwise_and_out_Tensor_out));
31726m.impl("bitwise_and_.Tensor", TORCH_FN(wrapper_CPU_bitwise_and__Tensor));
31727m.impl("bitwise_or.Tensor", TORCH_FN(wrapper_CPU_bitwise_or_Tensor));
31728m.impl("bitwise_or.Tensor_out", TORCH_FN(wrapper_CPU_bitwise_or_out_Tensor_out));
31729m.impl("bitwise_or_.Tensor", TORCH_FN(wrapper_CPU_bitwise_or__Tensor));
31730m.impl("bitwise_xor.Tensor", TORCH_FN(wrapper_CPU_bitwise_xor_Tensor));
31731m.impl("bitwise_xor.Tensor_out", TORCH_FN(wrapper_CPU_bitwise_xor_out_Tensor_out));
31732m.impl("bitwise_xor_.Tensor", TORCH_FN(wrapper_CPU_bitwise_xor__Tensor));
31733m.impl("__lshift__.Scalar",
31734TORCH_FN(wrapper_CPU_Scalar___lshift__));
31735m.impl("__ilshift__.Scalar",
31736TORCH_FN(wrapper_CPU_Scalar___ilshift__));
31737m.impl("__lshift__.Tensor",
31738TORCH_FN(wrapper_CPU_Tensor___lshift__));
31739m.impl("__ilshift__.Tensor",
31740TORCH_FN(wrapper_CPU_Tensor___ilshift__));
31741m.impl("bitwise_left_shift.Tensor", TORCH_FN(wrapper_CPU_bitwise_left_shift_Tensor));
31742m.impl("bitwise_left_shift.Tensor_out", TORCH_FN(wrapper_CPU_bitwise_left_shift_out_Tensor_out));
31743m.impl("bitwise_left_shift_.Tensor", TORCH_FN(wrapper_CPU_bitwise_left_shift__Tensor));
31744m.impl("__rshift__.Scalar",
31745TORCH_FN(wrapper_CPU_Scalar___rshift__));
31746m.impl("__irshift__.Scalar",
31747TORCH_FN(wrapper_CPU_Scalar___irshift__));
31748m.impl("__rshift__.Tensor",
31749TORCH_FN(wrapper_CPU_Tensor___rshift__));
31750m.impl("__irshift__.Tensor",
31751TORCH_FN(wrapper_CPU_Tensor___irshift__));
31752m.impl("bitwise_right_shift.Tensor", TORCH_FN(wrapper_CPU_bitwise_right_shift_Tensor));
31753m.impl("bitwise_right_shift.Tensor_out", TORCH_FN(wrapper_CPU_bitwise_right_shift_out_Tensor_out));
31754m.impl("bitwise_right_shift_.Tensor", TORCH_FN(wrapper_CPU_bitwise_right_shift__Tensor));
31755m.impl("tril", TORCH_FN(wrapper_CPU_tril));
31756m.impl("tril.out", TORCH_FN(wrapper_CPU_tril_out_out));
31757m.impl("tril_", TORCH_FN(wrapper_CPU_tril_));
31758m.impl("triu", TORCH_FN(wrapper_CPU_triu));
31759m.impl("triu.out", TORCH_FN(wrapper_CPU_triu_out_out));
31760m.impl("triu_", TORCH_FN(wrapper_CPU_triu_));
31761m.impl("digamma", TORCH_FN(wrapper_CPU_digamma));
31762m.impl("digamma.out", TORCH_FN(wrapper_CPU_digamma_out_out));
31763m.impl("digamma_", TORCH_FN(wrapper_CPU_digamma_));
31764m.impl("lerp.Scalar", TORCH_FN(wrapper_CPU_lerp_Scalar));
31765m.impl("lerp.Scalar_out", TORCH_FN(wrapper_CPU_lerp_out_Scalar_out));
31766m.impl("lerp_.Scalar", TORCH_FN(wrapper_CPU_lerp__Scalar));
31767m.impl("lerp.Tensor", TORCH_FN(wrapper_CPU_lerp_Tensor));
31768m.impl("lerp.Tensor_out", TORCH_FN(wrapper_CPU_lerp_out_Tensor_out));
31769m.impl("lerp_.Tensor", TORCH_FN(wrapper_CPU_lerp__Tensor));
31770m.impl("addbmm",
31771TORCH_FN(wrapper_CPU__addbmm));
31772m.impl("addbmm.out",
31773TORCH_FN(wrapper_CPU_out_addbmm_out));
31774m.impl("addbmm_",
31775TORCH_FN(wrapper_CPU__addbmm_));
31776m.impl("random_.from",
31777TORCH_FN(wrapper_CPU_from_random_));
31778m.impl("random_.to",
31779TORCH_FN(wrapper_CPU_to_random_));
31780m.impl("random_",
31781TORCH_FN(wrapper_CPU__random_));
31782m.impl("uniform_",
31783TORCH_FN(wrapper_CPU__uniform_));
31784m.impl("cauchy_",
31785TORCH_FN(wrapper_CPU__cauchy_));
31786m.impl("log_normal_",
31787TORCH_FN(wrapper_CPU__log_normal_));
31788m.impl("exponential_",
31789TORCH_FN(wrapper_CPU__exponential_));
31790m.impl("geometric_",
31791TORCH_FN(wrapper_CPU__geometric_));
31792m.impl("tril_indices",
31793TORCH_FN(wrapper_CPU__tril_indices));
31794m.impl("triu_indices",
31795TORCH_FN(wrapper_CPU__triu_indices));
31796m.impl("trace",
31797TORCH_FN(wrapper_CPU__trace));
31798m.impl("ne.Scalar", TORCH_FN(wrapper_CPU_ne_Scalar));
31799m.impl("ne.Scalar_out", TORCH_FN(wrapper_CPU_ne_out_Scalar_out));
31800m.impl("ne_.Scalar", TORCH_FN(wrapper_CPU_ne__Scalar));
31801m.impl("ne.Tensor", TORCH_FN(wrapper_CPU_ne_Tensor));
31802m.impl("ne.Tensor_out", TORCH_FN(wrapper_CPU_ne_out_Tensor_out));
31803m.impl("ne_.Tensor", TORCH_FN(wrapper_CPU_ne__Tensor));
31804m.impl("ge.Scalar", TORCH_FN(wrapper_CPU_ge_Scalar));
31805m.impl("ge.Scalar_out", TORCH_FN(wrapper_CPU_ge_out_Scalar_out));
31806m.impl("ge_.Scalar", TORCH_FN(wrapper_CPU_ge__Scalar));
31807m.impl("ge.Tensor", TORCH_FN(wrapper_CPU_ge_Tensor));
31808m.impl("ge.Tensor_out", TORCH_FN(wrapper_CPU_ge_out_Tensor_out));
31809m.impl("ge_.Tensor", TORCH_FN(wrapper_CPU_ge__Tensor));
31810m.impl("le.Scalar", TORCH_FN(wrapper_CPU_le_Scalar));
31811m.impl("le.Scalar_out", TORCH_FN(wrapper_CPU_le_out_Scalar_out));
31812m.impl("le_.Scalar", TORCH_FN(wrapper_CPU_le__Scalar));
31813m.impl("le.Tensor", TORCH_FN(wrapper_CPU_le_Tensor));
31814m.impl("le.Tensor_out", TORCH_FN(wrapper_CPU_le_out_Tensor_out));
31815m.impl("le_.Tensor", TORCH_FN(wrapper_CPU_le__Tensor));
31816m.impl("gt.Scalar", TORCH_FN(wrapper_CPU_gt_Scalar));
31817m.impl("gt.Scalar_out", TORCH_FN(wrapper_CPU_gt_out_Scalar_out));
31818m.impl("gt_.Scalar", TORCH_FN(wrapper_CPU_gt__Scalar));
31819m.impl("gt.Tensor", TORCH_FN(wrapper_CPU_gt_Tensor));
31820m.impl("gt.Tensor_out", TORCH_FN(wrapper_CPU_gt_out_Tensor_out));
31821m.impl("gt_.Tensor", TORCH_FN(wrapper_CPU_gt__Tensor));
31822m.impl("lt.Scalar", TORCH_FN(wrapper_CPU_lt_Scalar));
31823m.impl("lt.Scalar_out", TORCH_FN(wrapper_CPU_lt_out_Scalar_out));
31824m.impl("lt_.Scalar", TORCH_FN(wrapper_CPU_lt__Scalar));
31825m.impl("lt.Tensor", TORCH_FN(wrapper_CPU_lt_Tensor));
31826m.impl("lt.Tensor_out", TORCH_FN(wrapper_CPU_lt_out_Tensor_out));
31827m.impl("lt_.Tensor", TORCH_FN(wrapper_CPU_lt__Tensor));
31828m.impl("take",
31829TORCH_FN(wrapper_CPU__take));
31830m.impl("take.out",
31831TORCH_FN(wrapper_CPU_out_take_out));
31832m.impl("index_select",
31833TORCH_FN(wrapper_CPU__index_select));
31834m.impl("index_select.out",
31835TORCH_FN(wrapper_CPU_out_index_select_out));
31836m.impl("masked_select",
31837TORCH_FN(wrapper_CPU__masked_select));
31838m.impl("masked_select.out",
31839TORCH_FN(wrapper_CPU_out_masked_select_out));
31840m.impl("nonzero",
31841TORCH_FN(wrapper_CPU__nonzero));
31842m.impl("nonzero.out",
31843TORCH_FN(wrapper_CPU_out_nonzero_out));
31844m.impl("gather", TORCH_FN(wrapper_CPU_gather));
31845m.impl("gather.out", TORCH_FN(wrapper_CPU_gather_out_out));
31846m.impl("addcmul", TORCH_FN(wrapper_CPU_addcmul));
31847m.impl("addcmul.out", TORCH_FN(wrapper_CPU_addcmul_out_out));
31848m.impl("addcmul_", TORCH_FN(wrapper_CPU_addcmul_));
31849m.impl("addcdiv", TORCH_FN(wrapper_CPU_addcdiv));
31850m.impl("addcdiv.out", TORCH_FN(wrapper_CPU_addcdiv_out_out));
31851m.impl("addcdiv_", TORCH_FN(wrapper_CPU_addcdiv_));
31852m.impl("triangular_solve", TORCH_FN(wrapper_CPU_triangular_solve));
31853m.impl("triangular_solve.X", TORCH_FN(wrapper_CPU_triangular_solve_out_X));
31854m.impl("linalg_solve_triangular",
31855TORCH_FN(wrapper_CPU__linalg_solve_triangular));
31856m.impl("linalg_solve_triangular.out",
31857TORCH_FN(wrapper_CPU_out_linalg_solve_triangular_out));
31858m.impl("cholesky",
31859TORCH_FN(wrapper_CPU__cholesky));
31860m.impl("cholesky.out",
31861TORCH_FN(wrapper_CPU_out_cholesky_out));
31862m.impl("_cholesky_solve_helper",
31863TORCH_FN(wrapper_CPU___cholesky_solve_helper));
31864m.impl("cholesky_inverse",
31865TORCH_FN(wrapper_CPU__cholesky_inverse));
31866m.impl("cholesky_inverse.out",
31867TORCH_FN(wrapper_CPU_out_cholesky_inverse_out));
31868m.impl("geqrf",
31869TORCH_FN(wrapper_CPU__geqrf));
31870m.impl("geqrf.a",
31871TORCH_FN(wrapper_CPU_a_geqrf_out));
31872m.impl("ormqr",
31873TORCH_FN(wrapper_CPU__ormqr));
31874m.impl("ormqr.out",
31875TORCH_FN(wrapper_CPU_out_ormqr_out));
31876m.impl("lu_unpack", TORCH_FN(wrapper_CPU_lu_unpack));
31877m.impl("lu_unpack.out", TORCH_FN(wrapper_CPU_lu_unpack_out_out));
31878m.impl("multinomial",
31879TORCH_FN(wrapper_CPU__multinomial));
31880m.impl("multinomial.out",
31881TORCH_FN(wrapper_CPU_out_multinomial_out));
31882m.impl("lgamma", TORCH_FN(wrapper_CPU_lgamma));
31883m.impl("lgamma.out", TORCH_FN(wrapper_CPU_lgamma_out_out));
31884m.impl("lgamma_", TORCH_FN(wrapper_CPU_lgamma_));
31885m.impl("polygamma", TORCH_FN(wrapper_CPU_polygamma));
31886m.impl("polygamma.out", TORCH_FN(wrapper_CPU_polygamma_out_out));
31887m.impl("erfinv", TORCH_FN(wrapper_CPU_erfinv));
31888m.impl("erfinv.out", TORCH_FN(wrapper_CPU_erfinv_out_out));
31889m.impl("erfinv_", TORCH_FN(wrapper_CPU_erfinv_));
31890m.impl("i0", TORCH_FN(wrapper_CPU_i0));
31891m.impl("i0.out", TORCH_FN(wrapper_CPU_i0_out_out));
31892m.impl("i0_", TORCH_FN(wrapper_CPU_i0_));
31893m.impl("sign", TORCH_FN(wrapper_CPU_sign));
31894m.impl("sign.out", TORCH_FN(wrapper_CPU_sign_out_out));
31895m.impl("sign_", TORCH_FN(wrapper_CPU_sign_));
31896m.impl("signbit", TORCH_FN(wrapper_CPU_signbit));
31897m.impl("signbit.out", TORCH_FN(wrapper_CPU_signbit_out_out));
31898m.impl("atan2", TORCH_FN(wrapper_CPU_atan2));
31899m.impl("atan2.out", TORCH_FN(wrapper_CPU_atan2_out_out));
31900m.impl("atan2_", TORCH_FN(wrapper_CPU_atan2_));
31901m.impl("histc",
31902TORCH_FN(wrapper_CPU__histc));
31903m.impl("histc.out",
31904TORCH_FN(wrapper_CPU_out_histc_out));
31905m.impl("histogram.bins_tensor",
31906TORCH_FN(wrapper_CPU_bins_tensor_histogram));
31907m.impl("histogram.bins_tensor_out",
31908TORCH_FN(wrapper_CPU_bins_tensor_out_histogram_out));
31909m.impl("histogram.bin_ct",
31910TORCH_FN(wrapper_CPU_bin_ct_histogram));
31911m.impl("histogram.bin_ct_out",
31912TORCH_FN(wrapper_CPU_bin_ct_out_histogram_out));
31913m.impl("_histogramdd_bin_edges",
31914TORCH_FN(wrapper_CPU___histogramdd_bin_edges));
31915m.impl("_histogramdd_from_bin_cts",
31916TORCH_FN(wrapper_CPU___histogramdd_from_bin_cts));
31917m.impl("_histogramdd_from_bin_tensors",
31918TORCH_FN(wrapper_CPU___histogramdd_from_bin_tensors));
31919m.impl("fmod.Tensor", TORCH_FN(wrapper_CPU_fmod_Tensor));
31920m.impl("fmod.Tensor_out", TORCH_FN(wrapper_CPU_fmod_out_Tensor_out));
31921m.impl("fmod_.Tensor", TORCH_FN(wrapper_CPU_fmod__Tensor));
31922m.impl("hypot", TORCH_FN(wrapper_CPU_hypot));
31923m.impl("hypot.out", TORCH_FN(wrapper_CPU_hypot_out_out));
31924m.impl("hypot_", TORCH_FN(wrapper_CPU_hypot_));
31925m.impl("igamma", TORCH_FN(wrapper_CPU_igamma));
31926m.impl("igamma.out", TORCH_FN(wrapper_CPU_igamma_out_out));
31927m.impl("igamma_", TORCH_FN(wrapper_CPU_igamma_));
31928m.impl("igammac", TORCH_FN(wrapper_CPU_igammac));
31929m.impl("igammac.out", TORCH_FN(wrapper_CPU_igammac_out_out));
31930m.impl("igammac_", TORCH_FN(wrapper_CPU_igammac_));
31931m.impl("nextafter", TORCH_FN(wrapper_CPU_nextafter));
31932m.impl("nextafter.out", TORCH_FN(wrapper_CPU_nextafter_out_out));
31933m.impl("nextafter_", TORCH_FN(wrapper_CPU_nextafter_));
31934m.impl("remainder.Tensor", TORCH_FN(wrapper_CPU_remainder_Tensor));
31935m.impl("remainder.Tensor_out", TORCH_FN(wrapper_CPU_remainder_out_Tensor_out));
31936m.impl("remainder_.Tensor", TORCH_FN(wrapper_CPU_remainder__Tensor));
31937m.impl("remainder.Scalar_Tensor",
31938TORCH_FN(wrapper_CPU_Scalar_Tensor_remainder));
31939m.impl("min",
31940TORCH_FN(wrapper_CPU__min));
31941m.impl("fmin", TORCH_FN(wrapper_CPU_fmin));
31942m.impl("fmin.out", TORCH_FN(wrapper_CPU_fmin_out_out));
31943m.impl("max",
31944TORCH_FN(wrapper_CPU__max));
31945m.impl("max.unary_out",
31946TORCH_FN(wrapper_CPU_unary_out_max_out));
31947m.impl("fmax", TORCH_FN(wrapper_CPU_fmax));
31948m.impl("fmax.out", TORCH_FN(wrapper_CPU_fmax_out_out));
31949m.impl("maximum", TORCH_FN(wrapper_CPU_maximum));
31950m.impl("maximum.out", TORCH_FN(wrapper_CPU_maximum_out_out));
31951m.impl("minimum", TORCH_FN(wrapper_CPU_minimum));
31952m.impl("minimum.out", TORCH_FN(wrapper_CPU_minimum_out_out));
31953m.impl("sort.stable", TORCH_FN(wrapper_CPU_sort_stable));
31954m.impl("sort.values_stable", TORCH_FN(wrapper_CPU_sort_out_values_stable));
31955m.impl("argsort.stable",
31956TORCH_FN(wrapper_CPU_stable_argsort));
31957m.impl("topk", TORCH_FN(wrapper_CPU_topk));
31958m.impl("topk.values", TORCH_FN(wrapper_CPU_topk_out_values));
31959m.impl("all", TORCH_FN(wrapper_CPU_all));
31960m.impl("all.all_out", TORCH_FN(wrapper_CPU_all_out_all_out));
31961m.impl("any", TORCH_FN(wrapper_CPU_any));
31962m.impl("any.all_out", TORCH_FN(wrapper_CPU_any_out_all_out));
31963m.impl("renorm", TORCH_FN(wrapper_CPU_renorm));
31964m.impl("renorm.out", TORCH_FN(wrapper_CPU_renorm_out_out));
31965m.impl("renorm_", TORCH_FN(wrapper_CPU_renorm_));
31966m.impl("unfold",
31967TORCH_FN(wrapper_CPU__unfold));
31968m.impl("unfold_backward",
31969TORCH_FN(wrapper_CPU__unfold_backward));
31970m.impl("equal",
31971TORCH_FN(wrapper_CPU__equal));
31972m.impl("pow.Tensor_Tensor", TORCH_FN(wrapper_CPU_pow_Tensor_Tensor));
31973m.impl("pow.Tensor_Tensor_out", TORCH_FN(wrapper_CPU_pow_out_Tensor_Tensor_out));
31974m.impl("pow_.Tensor", TORCH_FN(wrapper_CPU_pow__Tensor));
31975m.impl("pow.Scalar", TORCH_FN(wrapper_CPU_pow_Scalar));
31976m.impl("pow.Scalar_out", TORCH_FN(wrapper_CPU_pow_out_Scalar_out));
31977m.impl("pow.Tensor_Scalar", TORCH_FN(wrapper_CPU_pow_Tensor_Scalar));
31978m.impl("pow.Tensor_Scalar_out", TORCH_FN(wrapper_CPU_pow_out_Tensor_Scalar_out));
31979m.impl("pow_.Scalar", TORCH_FN(wrapper_CPU_pow__Scalar));
31980m.impl("normal_",
31981TORCH_FN(wrapper_CPU__normal_));
31982m.impl("normal.Tensor_float",
31983TORCH_FN(wrapper_CPU_Tensor_float_normal));
31984m.impl("normal.Tensor_float_out",
31985TORCH_FN(wrapper_CPU_Tensor_float_out_normal_out));
31986m.impl("normal.float_Tensor",
31987TORCH_FN(wrapper_CPU_float_Tensor_normal));
31988m.impl("normal.float_Tensor_out",
31989TORCH_FN(wrapper_CPU_float_Tensor_out_normal_out));
31990m.impl("normal.Tensor_Tensor",
31991TORCH_FN(wrapper_CPU_Tensor_Tensor_normal));
31992m.impl("normal.Tensor_Tensor_out",
31993TORCH_FN(wrapper_CPU_Tensor_Tensor_out_normal_out));
31994m.impl("_foreach_add.Scalar",
31995TORCH_FN(wrapper_CPU_Scalar__foreach_add));
31996m.impl("_foreach_add_.Scalar",
31997TORCH_FN(wrapper_CPU_Scalar__foreach_add_));
31998m.impl("_foreach_sub.Scalar",
31999TORCH_FN(wrapper_CPU_Scalar__foreach_sub));
32000m.impl("_foreach_sub_.Scalar",
32001TORCH_FN(wrapper_CPU_Scalar__foreach_sub_));
32002m.impl("_foreach_mul.Scalar",
32003TORCH_FN(wrapper_CPU_Scalar__foreach_mul));
32004m.impl("_foreach_mul_.Scalar",
32005TORCH_FN(wrapper_CPU_Scalar__foreach_mul_));
32006m.impl("_foreach_div.Scalar",
32007TORCH_FN(wrapper_CPU_Scalar__foreach_div));
32008m.impl("_foreach_div_.Scalar",
32009TORCH_FN(wrapper_CPU_Scalar__foreach_div_));
32010m.impl("_foreach_clamp_min.Scalar",
32011TORCH_FN(wrapper_CPU_Scalar__foreach_clamp_min));
32012m.impl("_foreach_clamp_min_.Scalar",
32013TORCH_FN(wrapper_CPU_Scalar__foreach_clamp_min_));
32014m.impl("_foreach_clamp_max.Scalar",
32015TORCH_FN(wrapper_CPU_Scalar__foreach_clamp_max));
32016m.impl("_foreach_clamp_max_.Scalar",
32017TORCH_FN(wrapper_CPU_Scalar__foreach_clamp_max_));
32018m.impl("_foreach_maximum.Scalar",
32019TORCH_FN(wrapper_CPU_Scalar__foreach_maximum));
32020m.impl("_foreach_maximum_.Scalar",
32021TORCH_FN(wrapper_CPU_Scalar__foreach_maximum_));
32022m.impl("_foreach_minimum.Scalar",
32023TORCH_FN(wrapper_CPU_Scalar__foreach_minimum));
32024m.impl("_foreach_minimum_.Scalar",
32025TORCH_FN(wrapper_CPU_Scalar__foreach_minimum_));
32026m.impl("_foreach_add.List",
32027TORCH_FN(wrapper_CPU_List__foreach_add));
32028m.impl("_foreach_add_.List",
32029TORCH_FN(wrapper_CPU_List__foreach_add_));
32030m.impl("_foreach_sub.List",
32031TORCH_FN(wrapper_CPU_List__foreach_sub));
32032m.impl("_foreach_sub_.List",
32033TORCH_FN(wrapper_CPU_List__foreach_sub_));
32034m.impl("_foreach_mul.List",
32035TORCH_FN(wrapper_CPU_List__foreach_mul));
32036m.impl("_foreach_mul_.List",
32037TORCH_FN(wrapper_CPU_List__foreach_mul_));
32038m.impl("_foreach_div.List",
32039TORCH_FN(wrapper_CPU_List__foreach_div));
32040m.impl("_foreach_div_.List",
32041TORCH_FN(wrapper_CPU_List__foreach_div_));
32042m.impl("_foreach_clamp_min.List",
32043TORCH_FN(wrapper_CPU_List__foreach_clamp_min));
32044m.impl("_foreach_clamp_min_.List",
32045TORCH_FN(wrapper_CPU_List__foreach_clamp_min_));
32046m.impl("_foreach_clamp_max.List",
32047TORCH_FN(wrapper_CPU_List__foreach_clamp_max));
32048m.impl("_foreach_clamp_max_.List",
32049TORCH_FN(wrapper_CPU_List__foreach_clamp_max_));
32050m.impl("_foreach_maximum.List",
32051TORCH_FN(wrapper_CPU_List__foreach_maximum));
32052m.impl("_foreach_maximum_.List",
32053TORCH_FN(wrapper_CPU_List__foreach_maximum_));
32054m.impl("_foreach_minimum.List",
32055TORCH_FN(wrapper_CPU_List__foreach_minimum));
32056m.impl("_foreach_minimum_.List",
32057TORCH_FN(wrapper_CPU_List__foreach_minimum_));
32058m.impl("_foreach_add.ScalarList",
32059TORCH_FN(wrapper_CPU_ScalarList__foreach_add));
32060m.impl("_foreach_add_.ScalarList",
32061TORCH_FN(wrapper_CPU_ScalarList__foreach_add_));
32062m.impl("_foreach_sub.ScalarList",
32063TORCH_FN(wrapper_CPU_ScalarList__foreach_sub));
32064m.impl("_foreach_sub_.ScalarList",
32065TORCH_FN(wrapper_CPU_ScalarList__foreach_sub_));
32066m.impl("_foreach_div.ScalarList",
32067TORCH_FN(wrapper_CPU_ScalarList__foreach_div));
32068m.impl("_foreach_div_.ScalarList",
32069TORCH_FN(wrapper_CPU_ScalarList__foreach_div_));
32070m.impl("_foreach_mul.ScalarList",
32071TORCH_FN(wrapper_CPU_ScalarList__foreach_mul));
32072m.impl("_foreach_mul_.ScalarList",
32073TORCH_FN(wrapper_CPU_ScalarList__foreach_mul_));
32074m.impl("_foreach_clamp_min.ScalarList",
32075TORCH_FN(wrapper_CPU_ScalarList__foreach_clamp_min));
32076m.impl("_foreach_clamp_min_.ScalarList",
32077TORCH_FN(wrapper_CPU_ScalarList__foreach_clamp_min_));
32078m.impl("_foreach_clamp_max.ScalarList",
32079TORCH_FN(wrapper_CPU_ScalarList__foreach_clamp_max));
32080m.impl("_foreach_clamp_max_.ScalarList",
32081TORCH_FN(wrapper_CPU_ScalarList__foreach_clamp_max_));
32082m.impl("_foreach_maximum.ScalarList",
32083TORCH_FN(wrapper_CPU_ScalarList__foreach_maximum));
32084m.impl("_foreach_maximum_.ScalarList",
32085TORCH_FN(wrapper_CPU_ScalarList__foreach_maximum_));
32086m.impl("_foreach_minimum.ScalarList",
32087TORCH_FN(wrapper_CPU_ScalarList__foreach_minimum));
32088m.impl("_foreach_minimum_.ScalarList",
32089TORCH_FN(wrapper_CPU_ScalarList__foreach_minimum_));
32090m.impl("_foreach_exp",
32091TORCH_FN(wrapper_CPU___foreach_exp));
32092m.impl("_foreach_exp_",
32093TORCH_FN(wrapper_CPU___foreach_exp_));
32094m.impl("_foreach_zero_",
32095TORCH_FN(wrapper_CPU___foreach_zero_));
32096m.impl("_foreach_sqrt",
32097TORCH_FN(wrapper_CPU___foreach_sqrt));
32098m.impl("_foreach_sqrt_",
32099TORCH_FN(wrapper_CPU___foreach_sqrt_));
32100m.impl("_foreach_abs",
32101TORCH_FN(wrapper_CPU___foreach_abs));
32102m.impl("_foreach_abs_",
32103TORCH_FN(wrapper_CPU___foreach_abs_));
32104m.impl("_foreach_acos",
32105TORCH_FN(wrapper_CPU___foreach_acos));
32106m.impl("_foreach_acos_",
32107TORCH_FN(wrapper_CPU___foreach_acos_));
32108m.impl("_foreach_asin",
32109TORCH_FN(wrapper_CPU___foreach_asin));
32110m.impl("_foreach_asin_",
32111TORCH_FN(wrapper_CPU___foreach_asin_));
32112m.impl("_foreach_atan",
32113TORCH_FN(wrapper_CPU___foreach_atan));
32114m.impl("_foreach_atan_",
32115TORCH_FN(wrapper_CPU___foreach_atan_));
32116m.impl("_foreach_ceil",
32117TORCH_FN(wrapper_CPU___foreach_ceil));
32118m.impl("_foreach_ceil_",
32119TORCH_FN(wrapper_CPU___foreach_ceil_));
32120m.impl("_foreach_cos",
32121TORCH_FN(wrapper_CPU___foreach_cos));
32122m.impl("_foreach_cos_",
32123TORCH_FN(wrapper_CPU___foreach_cos_));
32124m.impl("_foreach_cosh",
32125TORCH_FN(wrapper_CPU___foreach_cosh));
32126m.impl("_foreach_cosh_",
32127TORCH_FN(wrapper_CPU___foreach_cosh_));
32128m.impl("_foreach_erf",
32129TORCH_FN(wrapper_CPU___foreach_erf));
32130m.impl("_foreach_erf_",
32131TORCH_FN(wrapper_CPU___foreach_erf_));
32132m.impl("_foreach_erfc",
32133TORCH_FN(wrapper_CPU___foreach_erfc));
32134m.impl("_foreach_erfc_",
32135TORCH_FN(wrapper_CPU___foreach_erfc_));
32136m.impl("_foreach_expm1",
32137TORCH_FN(wrapper_CPU___foreach_expm1));
32138m.impl("_foreach_expm1_",
32139TORCH_FN(wrapper_CPU___foreach_expm1_));
32140m.impl("_foreach_floor",
32141TORCH_FN(wrapper_CPU___foreach_floor));
32142m.impl("_foreach_floor_",
32143TORCH_FN(wrapper_CPU___foreach_floor_));
32144m.impl("_foreach_log",
32145TORCH_FN(wrapper_CPU___foreach_log));
32146m.impl("_foreach_log_",
32147TORCH_FN(wrapper_CPU___foreach_log_));
32148m.impl("_foreach_log10",
32149TORCH_FN(wrapper_CPU___foreach_log10));
32150m.impl("_foreach_log10_",
32151TORCH_FN(wrapper_CPU___foreach_log10_));
32152m.impl("_foreach_log1p",
32153TORCH_FN(wrapper_CPU___foreach_log1p));
32154m.impl("_foreach_log1p_",
32155TORCH_FN(wrapper_CPU___foreach_log1p_));
32156m.impl("_foreach_log2",
32157TORCH_FN(wrapper_CPU___foreach_log2));
32158m.impl("_foreach_log2_",
32159TORCH_FN(wrapper_CPU___foreach_log2_));
32160m.impl("_foreach_neg",
32161TORCH_FN(wrapper_CPU___foreach_neg));
32162m.impl("_foreach_neg_",
32163TORCH_FN(wrapper_CPU___foreach_neg_));
32164m.impl("_foreach_tan",
32165TORCH_FN(wrapper_CPU___foreach_tan));
32166m.impl("_foreach_tan_",
32167TORCH_FN(wrapper_CPU___foreach_tan_));
32168m.impl("_foreach_tanh",
32169TORCH_FN(wrapper_CPU___foreach_tanh));
32170m.impl("_foreach_tanh_",
32171TORCH_FN(wrapper_CPU___foreach_tanh_));
32172m.impl("_foreach_sin",
32173TORCH_FN(wrapper_CPU___foreach_sin));
32174m.impl("_foreach_sin_",
32175TORCH_FN(wrapper_CPU___foreach_sin_));
32176m.impl("_foreach_sinh",
32177TORCH_FN(wrapper_CPU___foreach_sinh));
32178m.impl("_foreach_sinh_",
32179TORCH_FN(wrapper_CPU___foreach_sinh_));
32180m.impl("_foreach_round",
32181TORCH_FN(wrapper_CPU___foreach_round));
32182m.impl("_foreach_round_",
32183TORCH_FN(wrapper_CPU___foreach_round_));
32184m.impl("_foreach_lgamma",
32185TORCH_FN(wrapper_CPU___foreach_lgamma));
32186m.impl("_foreach_lgamma_",
32187TORCH_FN(wrapper_CPU___foreach_lgamma_));
32188m.impl("_foreach_frac",
32189TORCH_FN(wrapper_CPU___foreach_frac));
32190m.impl("_foreach_frac_",
32191TORCH_FN(wrapper_CPU___foreach_frac_));
32192m.impl("_foreach_reciprocal",
32193TORCH_FN(wrapper_CPU___foreach_reciprocal));
32194m.impl("_foreach_reciprocal_",
32195TORCH_FN(wrapper_CPU___foreach_reciprocal_));
32196m.impl("_foreach_sigmoid",
32197TORCH_FN(wrapper_CPU___foreach_sigmoid));
32198m.impl("_foreach_sigmoid_",
32199TORCH_FN(wrapper_CPU___foreach_sigmoid_));
32200m.impl("_foreach_trunc",
32201TORCH_FN(wrapper_CPU___foreach_trunc));
32202m.impl("_foreach_trunc_",
32203TORCH_FN(wrapper_CPU___foreach_trunc_));
32204m.impl("_foreach_addcdiv.Scalar",
32205TORCH_FN(wrapper_CPU_Scalar__foreach_addcdiv));
32206m.impl("_foreach_addcdiv_.Scalar",
32207TORCH_FN(wrapper_CPU_Scalar__foreach_addcdiv_));
32208m.impl("_foreach_addcmul.Scalar",
32209TORCH_FN(wrapper_CPU_Scalar__foreach_addcmul));
32210m.impl("_foreach_addcmul_.Scalar",
32211TORCH_FN(wrapper_CPU_Scalar__foreach_addcmul_));
32212m.impl("_foreach_addcdiv.ScalarList",
32213TORCH_FN(wrapper_CPU_ScalarList__foreach_addcdiv));
32214m.impl("_foreach_addcdiv_.ScalarList",
32215TORCH_FN(wrapper_CPU_ScalarList__foreach_addcdiv_));
32216m.impl("_foreach_addcdiv.Tensor",
32217TORCH_FN(wrapper_CPU_Tensor__foreach_addcdiv));
32218m.impl("_foreach_addcdiv_.Tensor",
32219TORCH_FN(wrapper_CPU_Tensor__foreach_addcdiv_));
32220m.impl("_foreach_addcmul.ScalarList",
32221TORCH_FN(wrapper_CPU_ScalarList__foreach_addcmul));
32222m.impl("_foreach_addcmul_.ScalarList",
32223TORCH_FN(wrapper_CPU_ScalarList__foreach_addcmul_));
32224m.impl("_foreach_addcmul.Tensor",
32225TORCH_FN(wrapper_CPU_Tensor__foreach_addcmul));
32226m.impl("_foreach_addcmul_.Tensor",
32227TORCH_FN(wrapper_CPU_Tensor__foreach_addcmul_));
32228m.impl("_foreach_norm.Scalar",
32229TORCH_FN(wrapper_CPU_Scalar__foreach_norm));
32230m.impl("_foreach_lerp.List",
32231TORCH_FN(wrapper_CPU_List__foreach_lerp));
32232m.impl("_foreach_lerp_.List",
32233TORCH_FN(wrapper_CPU_List__foreach_lerp_));
32234m.impl("_foreach_lerp.Scalar",
32235TORCH_FN(wrapper_CPU_Scalar__foreach_lerp));
32236m.impl("_foreach_lerp_.Scalar",
32237TORCH_FN(wrapper_CPU_Scalar__foreach_lerp_));
32238m.impl("bucketize.Tensor",
32239TORCH_FN(wrapper_CPU_Tensor_bucketize));
32240m.impl("bucketize.Tensor_out",
32241TORCH_FN(wrapper_CPU_Tensor_out_bucketize_out));
32242m.impl("bucketize.Scalar",
32243TORCH_FN(wrapper_CPU_Scalar_bucketize));
32244m.impl("searchsorted.Tensor",
32245TORCH_FN(wrapper_CPU_Tensor_searchsorted));
32246m.impl("searchsorted.Tensor_out",
32247TORCH_FN(wrapper_CPU_Tensor_out_searchsorted_out));
32248m.impl("searchsorted.Scalar",
32249TORCH_FN(wrapper_CPU_Scalar_searchsorted));
32250m.impl("_convert_indices_from_coo_to_csr", TORCH_FN(wrapper_CPU__convert_indices_from_coo_to_csr));
32251m.impl("_convert_indices_from_coo_to_csr.out", TORCH_FN(wrapper_CPU__convert_indices_from_coo_to_csr_out_out));
32252m.impl("_convert_indices_from_csr_to_coo", TORCH_FN(wrapper_CPU__convert_indices_from_csr_to_coo));
32253m.impl("_convert_indices_from_csr_to_coo.out", TORCH_FN(wrapper_CPU__convert_indices_from_csr_to_coo_out_out));
32254m.impl("mse_loss", TORCH_FN(wrapper_CPU_mse_loss));
32255m.impl("mse_loss.out", TORCH_FN(wrapper_CPU_mse_loss_out_out));
32256m.impl("mse_loss_backward",
32257TORCH_FN(wrapper_CPU__mse_loss_backward));
32258m.impl("mse_loss_backward.grad_input",
32259TORCH_FN(wrapper_CPU_grad_input_mse_loss_backward_out));
32260m.impl("multi_margin_loss",
32261TORCH_FN(wrapper_CPU__multi_margin_loss));
32262m.impl("multi_margin_loss.out",
32263TORCH_FN(wrapper_CPU_out_multi_margin_loss_out));
32264m.impl("multi_margin_loss_backward",
32265TORCH_FN(wrapper_CPU__multi_margin_loss_backward));
32266m.impl("multi_margin_loss_backward.grad_input",
32267TORCH_FN(wrapper_CPU_grad_input_multi_margin_loss_backward_out));
32268m.impl("multilabel_margin_loss_forward",
32269TORCH_FN(wrapper_CPU__multilabel_margin_loss_forward));
32270m.impl("multilabel_margin_loss_forward.output",
32271TORCH_FN(wrapper_CPU_output_multilabel_margin_loss_forward_out));
32272m.impl("multilabel_margin_loss_backward",
32273TORCH_FN(wrapper_CPU__multilabel_margin_loss_backward));
32274m.impl("multilabel_margin_loss_backward.grad_input",
32275TORCH_FN(wrapper_CPU_grad_input_multilabel_margin_loss_backward_out));
32276m.impl("nll_loss_forward", TORCH_FN(wrapper_CPU_nll_loss_forward));
32277m.impl("nll_loss_forward.output", TORCH_FN(wrapper_CPU_nll_loss_forward_out_output));
32278m.impl("nll_loss_backward", TORCH_FN(wrapper_CPU_nll_loss_backward));
32279m.impl("nll_loss_backward.grad_input", TORCH_FN(wrapper_CPU_nll_loss_backward_out_grad_input));
32280m.impl("nll_loss2d_forward",
32281TORCH_FN(wrapper_CPU__nll_loss2d_forward));
32282m.impl("nll_loss2d_forward.output",
32283TORCH_FN(wrapper_CPU_output_nll_loss2d_forward_out));
32284m.impl("nll_loss2d_backward",
32285TORCH_FN(wrapper_CPU__nll_loss2d_backward));
32286m.impl("nll_loss2d_backward.grad_input",
32287TORCH_FN(wrapper_CPU_grad_input_nll_loss2d_backward_out));
32288m.impl("smooth_l1_loss", TORCH_FN(wrapper_CPU_smooth_l1_loss));
32289m.impl("smooth_l1_loss.out", TORCH_FN(wrapper_CPU_smooth_l1_loss_out_out));
32290m.impl("smooth_l1_loss_backward.grad_input",
32291TORCH_FN(wrapper_CPU_grad_input_smooth_l1_loss_backward_out));
32292m.impl("huber_loss",
32293TORCH_FN(wrapper_CPU__huber_loss));
32294m.impl("huber_loss.out",
32295TORCH_FN(wrapper_CPU_out_huber_loss_out));
32296m.impl("huber_loss_backward.out",
32297TORCH_FN(wrapper_CPU_out_huber_loss_backward_out));
32298m.impl("elu", TORCH_FN(wrapper_CPU_elu));
32299m.impl("elu.out", TORCH_FN(wrapper_CPU_elu_out_out));
32300m.impl("elu_", TORCH_FN(wrapper_CPU_elu_));
32301m.impl("elu_backward", TORCH_FN(wrapper_CPU_elu_backward));
32302m.impl("elu_backward.grad_input", TORCH_FN(wrapper_CPU_elu_backward_out_grad_input));
32303m.impl("glu", TORCH_FN(wrapper_CPU_glu));
32304m.impl("glu.out", TORCH_FN(wrapper_CPU_glu_out_out));
32305m.impl("glu_backward",
32306TORCH_FN(wrapper_CPU__glu_backward));
32307m.impl("glu_backward.grad_input",
32308TORCH_FN(wrapper_CPU_grad_input_glu_backward_out));
32309m.impl("glu_jvp",
32310TORCH_FN(wrapper_CPU__glu_jvp));
32311m.impl("glu_backward_jvp",
32312TORCH_FN(wrapper_CPU__glu_backward_jvp));
32313m.impl("hardsigmoid", TORCH_FN(wrapper_CPU_hardsigmoid));
32314m.impl("hardsigmoid.out", TORCH_FN(wrapper_CPU_hardsigmoid_out_out));
32315m.impl("hardsigmoid_", TORCH_FN(wrapper_CPU_hardsigmoid_));
32316m.impl("hardsigmoid_backward", TORCH_FN(wrapper_CPU_hardsigmoid_backward));
32317m.impl("hardsigmoid_backward.grad_input", TORCH_FN(wrapper_CPU_hardsigmoid_backward_out_grad_input));
32318m.impl("hardtanh",
32319TORCH_FN(wrapper_CPU__hardtanh));
32320m.impl("hardtanh.out",
32321TORCH_FN(wrapper_CPU_out_hardtanh_out));
32322m.impl("hardtanh_",
32323TORCH_FN(wrapper_CPU__hardtanh_));
32324m.impl("hardtanh_backward",
32325TORCH_FN(wrapper_CPU__hardtanh_backward));
32326m.impl("hardtanh_backward.grad_input",
32327TORCH_FN(wrapper_CPU_grad_input_hardtanh_backward_out));
32328m.impl("hardswish",
32329TORCH_FN(wrapper_CPU__hardswish));
32330m.impl("hardswish.out",
32331TORCH_FN(wrapper_CPU_out_hardswish_out));
32332m.impl("hardswish_",
32333TORCH_FN(wrapper_CPU__hardswish_));
32334m.impl("hardswish_backward",
32335TORCH_FN(wrapper_CPU__hardswish_backward));
32336m.impl("leaky_relu", TORCH_FN(wrapper_CPU_leaky_relu));
32337m.impl("leaky_relu.out", TORCH_FN(wrapper_CPU_leaky_relu_out_out));
32338m.impl("leaky_relu_", TORCH_FN(wrapper_CPU_leaky_relu_));
32339m.impl("leaky_relu_backward", TORCH_FN(wrapper_CPU_leaky_relu_backward));
32340m.impl("leaky_relu_backward.grad_input", TORCH_FN(wrapper_CPU_leaky_relu_backward_out_grad_input));
32341m.impl("log_sigmoid_forward",
32342TORCH_FN(wrapper_CPU__log_sigmoid_forward));
32343m.impl("log_sigmoid_forward.output",
32344TORCH_FN(wrapper_CPU_output_log_sigmoid_forward_out));
32345m.impl("log_sigmoid_backward",
32346TORCH_FN(wrapper_CPU__log_sigmoid_backward));
32347m.impl("log_sigmoid_backward.grad_input",
32348TORCH_FN(wrapper_CPU_grad_input_log_sigmoid_backward_out));
32349m.impl("rrelu_with_noise",
32350TORCH_FN(wrapper_CPU__rrelu_with_noise));
32351m.impl("rrelu_with_noise.out",
32352TORCH_FN(wrapper_CPU_out_rrelu_with_noise_out));
32353m.impl("rrelu_with_noise_",
32354TORCH_FN(wrapper_CPU__rrelu_with_noise_));
32355m.impl("softplus", TORCH_FN(wrapper_CPU_softplus));
32356m.impl("softplus.out", TORCH_FN(wrapper_CPU_softplus_out_out));
32357m.impl("softplus_backward", TORCH_FN(wrapper_CPU_softplus_backward));
32358m.impl("softplus_backward.grad_input", TORCH_FN(wrapper_CPU_softplus_backward_out_grad_input));
32359m.impl("softshrink", TORCH_FN(wrapper_CPU_softshrink));
32360m.impl("softshrink.out", TORCH_FN(wrapper_CPU_softshrink_out_out));
32361m.impl("softshrink_backward", TORCH_FN(wrapper_CPU_softshrink_backward));
32362m.impl("softshrink_backward.grad_input", TORCH_FN(wrapper_CPU_softshrink_backward_out_grad_input));
32363m.impl("adaptive_avg_pool2d.out",
32364TORCH_FN(wrapper_CPU_out_adaptive_avg_pool2d_out));
32365m.impl("_adaptive_avg_pool2d",
32366TORCH_FN(wrapper_CPU___adaptive_avg_pool2d));
32367m.impl("_adaptive_avg_pool2d_backward",
32368TORCH_FN(wrapper_CPU___adaptive_avg_pool2d_backward));
32369m.impl("adaptive_avg_pool3d.out",
32370TORCH_FN(wrapper_CPU_out_adaptive_avg_pool3d_out));
32371m.impl("_adaptive_avg_pool3d",
32372TORCH_FN(wrapper_CPU___adaptive_avg_pool3d));
32373m.impl("adaptive_avg_pool3d_backward.grad_input",
32374TORCH_FN(wrapper_CPU_grad_input_adaptive_avg_pool3d_backward_out));
32375m.impl("_adaptive_avg_pool3d_backward",
32376TORCH_FN(wrapper_CPU___adaptive_avg_pool3d_backward));
32377m.impl("adaptive_max_pool2d", TORCH_FN(wrapper_CPU_adaptive_max_pool2d));
32378m.impl("adaptive_max_pool2d.out", TORCH_FN(wrapper_CPU_adaptive_max_pool2d_out_out));
32379m.impl("adaptive_max_pool2d_backward", TORCH_FN(wrapper_CPU_adaptive_max_pool2d_backward));
32380m.impl("adaptive_max_pool2d_backward.grad_input", TORCH_FN(wrapper_CPU_adaptive_max_pool2d_backward_out_grad_input));
32381m.impl("adaptive_max_pool3d", TORCH_FN(wrapper_CPU_adaptive_max_pool3d));
32382m.impl("adaptive_max_pool3d.out", TORCH_FN(wrapper_CPU_adaptive_max_pool3d_out_out));
32383m.impl("adaptive_max_pool3d_backward", TORCH_FN(wrapper_CPU_adaptive_max_pool3d_backward));
32384m.impl("adaptive_max_pool3d_backward.grad_input", TORCH_FN(wrapper_CPU_adaptive_max_pool3d_backward_out_grad_input));
32385m.impl("avg_pool2d", TORCH_FN(wrapper_CPU_avg_pool2d));
32386m.impl("avg_pool2d.out", TORCH_FN(wrapper_CPU_avg_pool2d_out_out));
32387m.impl("avg_pool2d_backward", TORCH_FN(wrapper_CPU_avg_pool2d_backward));
32388m.impl("avg_pool2d_backward.grad_input", TORCH_FN(wrapper_CPU_avg_pool2d_backward_out_grad_input));
32389m.impl("avg_pool3d", TORCH_FN(wrapper_CPU_avg_pool3d));
32390m.impl("avg_pool3d.out", TORCH_FN(wrapper_CPU_avg_pool3d_out_out));
32391m.impl("avg_pool3d_backward", TORCH_FN(wrapper_CPU_avg_pool3d_backward));
32392m.impl("avg_pool3d_backward.grad_input", TORCH_FN(wrapper_CPU_avg_pool3d_backward_out_grad_input));
32393m.impl("fractional_max_pool2d", TORCH_FN(wrapper_CPU_fractional_max_pool2d));
32394m.impl("fractional_max_pool2d.output", TORCH_FN(wrapper_CPU_fractional_max_pool2d_out_output));
32395m.impl("fractional_max_pool2d_backward", TORCH_FN(wrapper_CPU_fractional_max_pool2d_backward));
32396m.impl("fractional_max_pool2d_backward.grad_input", TORCH_FN(wrapper_CPU_fractional_max_pool2d_backward_out_grad_input));
32397m.impl("fractional_max_pool3d", TORCH_FN(wrapper_CPU_fractional_max_pool3d));
32398m.impl("fractional_max_pool3d.output", TORCH_FN(wrapper_CPU_fractional_max_pool3d_out_output));
32399m.impl("fractional_max_pool3d_backward",
32400TORCH_FN(wrapper_CPU__fractional_max_pool3d_backward));
32401m.impl("fractional_max_pool3d_backward.grad_input",
32402TORCH_FN(wrapper_CPU_grad_input_fractional_max_pool3d_backward_out));
32403m.impl("max_pool2d_with_indices", TORCH_FN(wrapper_CPU_max_pool2d_with_indices));
32404m.impl("max_pool2d_with_indices.out", TORCH_FN(wrapper_CPU_max_pool2d_with_indices_out_out));
32405m.impl("max_pool2d_with_indices_backward", TORCH_FN(wrapper_CPU_max_pool2d_with_indices_backward));
32406m.impl("max_pool2d_with_indices_backward.grad_input", TORCH_FN(wrapper_CPU_max_pool2d_with_indices_backward_out_grad_input));
32407m.impl("max_pool3d_with_indices",
32408TORCH_FN(wrapper_CPU__max_pool3d_with_indices));
32409m.impl("max_pool3d_with_indices.out",
32410TORCH_FN(wrapper_CPU_out_max_pool3d_with_indices_out));
32411m.impl("max_pool3d_with_indices_backward",
32412TORCH_FN(wrapper_CPU__max_pool3d_with_indices_backward));
32413m.impl("max_pool3d_with_indices_backward.grad_input",
32414TORCH_FN(wrapper_CPU_grad_input_max_pool3d_with_indices_backward_out));
32415m.impl("max_unpool2d",
32416TORCH_FN(wrapper_CPU__max_unpool2d));
32417m.impl("max_unpool2d.out",
32418TORCH_FN(wrapper_CPU_out_max_unpool2d_out));
32419m.impl("max_unpool3d",
32420TORCH_FN(wrapper_CPU__max_unpool3d));
32421m.impl("max_unpool3d.out",
32422TORCH_FN(wrapper_CPU_out_max_unpool3d_out));
32423m.impl("reflection_pad1d", TORCH_FN(wrapper_CPU_reflection_pad1d));
32424m.impl("reflection_pad1d.out", TORCH_FN(wrapper_CPU_reflection_pad1d_out_out));
32425m.impl("reflection_pad1d_backward", TORCH_FN(wrapper_CPU_reflection_pad1d_backward));
32426m.impl("reflection_pad1d_backward.grad_input", TORCH_FN(wrapper_CPU_reflection_pad1d_backward_out_grad_input));
32427m.impl("reflection_pad2d",
32428TORCH_FN(wrapper_CPU__reflection_pad2d));
32429m.impl("reflection_pad2d.out",
32430TORCH_FN(wrapper_CPU_out_reflection_pad2d_out));
32431m.impl("reflection_pad2d_backward",
32432TORCH_FN(wrapper_CPU__reflection_pad2d_backward));
32433m.impl("reflection_pad2d_backward.grad_input",
32434TORCH_FN(wrapper_CPU_grad_input_reflection_pad2d_backward_out));
32435m.impl("reflection_pad3d", TORCH_FN(wrapper_CPU_reflection_pad3d));
32436m.impl("reflection_pad3d.out", TORCH_FN(wrapper_CPU_reflection_pad3d_out_out));
32437m.impl("reflection_pad3d_backward", TORCH_FN(wrapper_CPU_reflection_pad3d_backward));
32438m.impl("reflection_pad3d_backward.grad_input", TORCH_FN(wrapper_CPU_reflection_pad3d_backward_out_grad_input));
32439m.impl("replication_pad1d", TORCH_FN(wrapper_CPU_replication_pad1d));
32440m.impl("replication_pad1d.out", TORCH_FN(wrapper_CPU_replication_pad1d_out_out));
32441m.impl("replication_pad1d_backward", TORCH_FN(wrapper_CPU_replication_pad1d_backward));
32442m.impl("replication_pad1d_backward.grad_input", TORCH_FN(wrapper_CPU_replication_pad1d_backward_out_grad_input));
32443m.impl("replication_pad2d", TORCH_FN(wrapper_CPU_replication_pad2d));
32444m.impl("replication_pad2d.out", TORCH_FN(wrapper_CPU_replication_pad2d_out_out));
32445m.impl("replication_pad2d_backward",
32446TORCH_FN(wrapper_CPU__replication_pad2d_backward));
32447m.impl("replication_pad2d_backward.grad_input",
32448TORCH_FN(wrapper_CPU_grad_input_replication_pad2d_backward_out));
32449m.impl("replication_pad3d", TORCH_FN(wrapper_CPU_replication_pad3d));
32450m.impl("replication_pad3d.out", TORCH_FN(wrapper_CPU_replication_pad3d_out_out));
32451m.impl("replication_pad3d_backward",
32452TORCH_FN(wrapper_CPU__replication_pad3d_backward));
32453m.impl("replication_pad3d_backward.grad_input",
32454TORCH_FN(wrapper_CPU_grad_input_replication_pad3d_backward_out));
32455m.impl("upsample_linear1d", TORCH_FN(wrapper_CPU_upsample_linear1d));
32456m.impl("upsample_linear1d.out", TORCH_FN(wrapper_CPU_upsample_linear1d_out_out));
32457m.impl("upsample_linear1d_backward", TORCH_FN(wrapper_CPU_upsample_linear1d_backward));
32458m.impl("upsample_linear1d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_linear1d_backward_out_grad_input));
32459m.impl("upsample_bilinear2d", TORCH_FN(wrapper_CPU_upsample_bilinear2d));
32460m.impl("upsample_bilinear2d.out", TORCH_FN(wrapper_CPU_upsample_bilinear2d_out_out));
32461m.impl("upsample_bilinear2d_backward", TORCH_FN(wrapper_CPU_upsample_bilinear2d_backward));
32462m.impl("upsample_bilinear2d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_bilinear2d_backward_out_grad_input));
32463m.impl("_upsample_bilinear2d_aa", TORCH_FN(wrapper_CPU__upsample_bilinear2d_aa));
32464m.impl("_upsample_bilinear2d_aa.out", TORCH_FN(wrapper_CPU__upsample_bilinear2d_aa_out_out));
32465m.impl("_upsample_bilinear2d_aa_backward", TORCH_FN(wrapper_CPU__upsample_bilinear2d_aa_backward));
32466m.impl("_upsample_bilinear2d_aa_backward.grad_input", TORCH_FN(wrapper_CPU__upsample_bilinear2d_aa_backward_out_grad_input));
32467m.impl("upsample_bicubic2d", TORCH_FN(wrapper_CPU_upsample_bicubic2d));
32468m.impl("upsample_bicubic2d.out", TORCH_FN(wrapper_CPU_upsample_bicubic2d_out_out));
32469m.impl("upsample_bicubic2d_backward", TORCH_FN(wrapper_CPU_upsample_bicubic2d_backward));
32470m.impl("upsample_bicubic2d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_bicubic2d_backward_out_grad_input));
32471m.impl("_upsample_bicubic2d_aa", TORCH_FN(wrapper_CPU__upsample_bicubic2d_aa));
32472m.impl("_upsample_bicubic2d_aa.out", TORCH_FN(wrapper_CPU__upsample_bicubic2d_aa_out_out));
32473m.impl("_upsample_bicubic2d_aa_backward", TORCH_FN(wrapper_CPU__upsample_bicubic2d_aa_backward));
32474m.impl("_upsample_bicubic2d_aa_backward.grad_input", TORCH_FN(wrapper_CPU__upsample_bicubic2d_aa_backward_out_grad_input));
32475m.impl("upsample_trilinear3d", TORCH_FN(wrapper_CPU_upsample_trilinear3d));
32476m.impl("upsample_trilinear3d.out", TORCH_FN(wrapper_CPU_upsample_trilinear3d_out_out));
32477m.impl("upsample_trilinear3d_backward", TORCH_FN(wrapper_CPU_upsample_trilinear3d_backward));
32478m.impl("upsample_trilinear3d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_trilinear3d_backward_out_grad_input));
32479m.impl("upsample_nearest1d", TORCH_FN(wrapper_CPU_upsample_nearest1d));
32480m.impl("upsample_nearest1d.out", TORCH_FN(wrapper_CPU_upsample_nearest1d_out_out));
32481m.impl("_upsample_nearest_exact1d", TORCH_FN(wrapper_CPU__upsample_nearest_exact1d));
32482m.impl("_upsample_nearest_exact1d.out", TORCH_FN(wrapper_CPU__upsample_nearest_exact1d_out_out));
32483m.impl("upsample_nearest1d_backward", TORCH_FN(wrapper_CPU_upsample_nearest1d_backward));
32484m.impl("upsample_nearest1d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_nearest1d_backward_out_grad_input));
32485m.impl("_upsample_nearest_exact1d_backward", TORCH_FN(wrapper_CPU__upsample_nearest_exact1d_backward));
32486m.impl("_upsample_nearest_exact1d_backward.grad_input", TORCH_FN(wrapper_CPU__upsample_nearest_exact1d_backward_out_grad_input));
32487m.impl("upsample_nearest2d", TORCH_FN(wrapper_CPU_upsample_nearest2d));
32488m.impl("upsample_nearest2d.out", TORCH_FN(wrapper_CPU_upsample_nearest2d_out_out));
32489m.impl("_upsample_nearest_exact2d", TORCH_FN(wrapper_CPU__upsample_nearest_exact2d));
32490m.impl("_upsample_nearest_exact2d.out", TORCH_FN(wrapper_CPU__upsample_nearest_exact2d_out_out));
32491m.impl("upsample_nearest2d_backward", TORCH_FN(wrapper_CPU_upsample_nearest2d_backward));
32492m.impl("upsample_nearest2d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_nearest2d_backward_out_grad_input));
32493m.impl("_upsample_nearest_exact2d_backward", TORCH_FN(wrapper_CPU__upsample_nearest_exact2d_backward));
32494m.impl("_upsample_nearest_exact2d_backward.grad_input", TORCH_FN(wrapper_CPU__upsample_nearest_exact2d_backward_out_grad_input));
32495m.impl("upsample_nearest3d", TORCH_FN(wrapper_CPU_upsample_nearest3d));
32496m.impl("upsample_nearest3d.out", TORCH_FN(wrapper_CPU_upsample_nearest3d_out_out));
32497m.impl("_upsample_nearest_exact3d", TORCH_FN(wrapper_CPU__upsample_nearest_exact3d));
32498m.impl("_upsample_nearest_exact3d.out", TORCH_FN(wrapper_CPU__upsample_nearest_exact3d_out_out));
32499m.impl("upsample_nearest3d_backward", TORCH_FN(wrapper_CPU_upsample_nearest3d_backward));
32500m.impl("upsample_nearest3d_backward.grad_input", TORCH_FN(wrapper_CPU_upsample_nearest3d_backward_out_grad_input));
32501m.impl("_upsample_nearest_exact3d_backward", TORCH_FN(wrapper_CPU__upsample_nearest_exact3d_backward));
32502m.impl("_upsample_nearest_exact3d_backward.grad_input", TORCH_FN(wrapper_CPU__upsample_nearest_exact3d_backward_out_grad_input));
32503m.impl("sigmoid_backward", TORCH_FN(wrapper_CPU_sigmoid_backward));
32504m.impl("sigmoid_backward.grad_input", TORCH_FN(wrapper_CPU_sigmoid_backward_out_grad_input));
32505m.impl("logit_backward", TORCH_FN(wrapper_CPU_logit_backward));
32506m.impl("logit_backward.grad_input", TORCH_FN(wrapper_CPU_logit_backward_out_grad_input));
32507m.impl("tanh_backward", TORCH_FN(wrapper_CPU_tanh_backward));
32508m.impl("tanh_backward.grad_input", TORCH_FN(wrapper_CPU_tanh_backward_out_grad_input));
32509m.impl("slow_conv_transpose2d", TORCH_FN(wrapper_CPU_slow_conv_transpose2d));
32510m.impl("slow_conv_transpose2d.out", TORCH_FN(wrapper_CPU_slow_conv_transpose2d_out_out));
32511m.impl("slow_conv_transpose3d",
32512TORCH_FN(wrapper_CPU__slow_conv_transpose3d));
32513m.impl("slow_conv_transpose3d.out",
32514TORCH_FN(wrapper_CPU_out_slow_conv_transpose3d_out));
32515m.impl("_slow_conv2d_forward",
32516TORCH_FN(wrapper_CPU___slow_conv2d_forward));
32517m.impl("_slow_conv2d_forward.output",
32518TORCH_FN(wrapper_CPU_output__slow_conv2d_forward_out));
32519m.impl("_slow_conv2d_backward.grad_input",
32520TORCH_FN(wrapper_CPU_grad_input__slow_conv2d_backward_out));
32521m.impl("_slow_conv2d_backward.output_mask",
32522TORCH_FN(wrapper_CPU_output_mask__slow_conv2d_backward));
32523m.impl("slow_conv3d_forward",
32524TORCH_FN(wrapper_CPU__slow_conv3d_forward));
32525m.impl("slow_conv3d_forward.output",
32526TORCH_FN(wrapper_CPU_output_slow_conv3d_forward_out));
32527m.impl("slow_conv_dilated2d",
32528TORCH_FN(wrapper_CPU__slow_conv_dilated2d));
32529m.impl("slow_conv_dilated3d",
32530TORCH_FN(wrapper_CPU__slow_conv_dilated3d));
32531m.impl("col2im",
32532TORCH_FN(wrapper_CPU__col2im));
32533m.impl("col2im.out",
32534TORCH_FN(wrapper_CPU_out_col2im_out));
32535m.impl("im2col",
32536TORCH_FN(wrapper_CPU__im2col));
32537m.impl("im2col.out",
32538TORCH_FN(wrapper_CPU_out_im2col_out));
32539m.impl("isposinf", TORCH_FN(wrapper_CPU_isposinf));
32540m.impl("isposinf.out", TORCH_FN(wrapper_CPU_isposinf_out_out));
32541m.impl("isneginf", TORCH_FN(wrapper_CPU_isneginf));
32542m.impl("isneginf.out", TORCH_FN(wrapper_CPU_isneginf_out_out));
32543m.impl("special_entr", TORCH_FN(wrapper_CPU_special_entr));
32544m.impl("special_entr.out", TORCH_FN(wrapper_CPU_special_entr_out_out));
32545m.impl("special_ndtri", TORCH_FN(wrapper_CPU_special_ndtri));
32546m.impl("special_ndtri.out", TORCH_FN(wrapper_CPU_special_ndtri_out_out));
32547m.impl("special_log_ndtr", TORCH_FN(wrapper_CPU_special_log_ndtr));
32548m.impl("special_log_ndtr.out", TORCH_FN(wrapper_CPU_special_log_ndtr_out_out));
32549m.impl("special_erfcx", TORCH_FN(wrapper_CPU_special_erfcx));
32550m.impl("special_erfcx.out", TORCH_FN(wrapper_CPU_special_erfcx_out_out));
32551m.impl("special_xlog1py", TORCH_FN(wrapper_CPU_special_xlog1py));
32552m.impl("special_xlog1py.out", TORCH_FN(wrapper_CPU_special_xlog1py_out_out));
32553m.impl("special_zeta", TORCH_FN(wrapper_CPU_special_zeta));
32554m.impl("special_zeta.out", TORCH_FN(wrapper_CPU_special_zeta_out_out));
32555m.impl("special_i0e", TORCH_FN(wrapper_CPU_special_i0e));
32556m.impl("special_i0e.out", TORCH_FN(wrapper_CPU_special_i0e_out_out));
32557m.impl("special_i1", TORCH_FN(wrapper_CPU_special_i1));
32558m.impl("special_i1.out", TORCH_FN(wrapper_CPU_special_i1_out_out));
32559m.impl("special_i1e", TORCH_FN(wrapper_CPU_special_i1e));
32560m.impl("special_i1e.out", TORCH_FN(wrapper_CPU_special_i1e_out_out));
32561m.impl("linalg_cholesky_ex", TORCH_FN(wrapper_CPU_linalg_cholesky_ex));
32562m.impl("linalg_cholesky_ex.L", TORCH_FN(wrapper_CPU_linalg_cholesky_ex_out_L));
32563m.impl("linalg_cross", TORCH_FN(wrapper_CPU_linalg_cross));
32564m.impl("linalg_cross.out", TORCH_FN(wrapper_CPU_linalg_cross_out_out));
32565m.impl("linalg_lu_factor_ex", TORCH_FN(wrapper_CPU_linalg_lu_factor_ex));
32566m.impl("linalg_lu_factor_ex.out", TORCH_FN(wrapper_CPU_linalg_lu_factor_ex_out_out));
32567m.impl("linalg_lu", TORCH_FN(wrapper_CPU_linalg_lu));
32568m.impl("linalg_lu.out", TORCH_FN(wrapper_CPU_linalg_lu_out_out));
32569m.impl("linalg_lu_solve", TORCH_FN(wrapper_CPU_linalg_lu_solve));
32570m.impl("linalg_lu_solve.out", TORCH_FN(wrapper_CPU_linalg_lu_solve_out_out));
32571m.impl("_linalg_det", TORCH_FN(wrapper_CPU__linalg_det));
32572m.impl("_linalg_det.result", TORCH_FN(wrapper_CPU__linalg_det_out_result));
32573m.impl("linalg_ldl_factor_ex", TORCH_FN(wrapper_CPU_linalg_ldl_factor_ex));
32574m.impl("linalg_ldl_factor_ex.out", TORCH_FN(wrapper_CPU_linalg_ldl_factor_ex_out_out));
32575m.impl("linalg_ldl_solve", TORCH_FN(wrapper_CPU_linalg_ldl_solve));
32576m.impl("linalg_ldl_solve.out", TORCH_FN(wrapper_CPU_linalg_ldl_solve_out_out));
32577m.impl("linalg_lstsq.out",
32578TORCH_FN(wrapper_CPU_out_linalg_lstsq_out));
32579m.impl("linalg_matrix_exp",
32580TORCH_FN(wrapper_CPU__linalg_matrix_exp));
32581m.impl("_linalg_slogdet", TORCH_FN(wrapper_CPU__linalg_slogdet));
32582m.impl("_linalg_slogdet.sign", TORCH_FN(wrapper_CPU__linalg_slogdet_out_sign));
32583m.impl("linalg_eig",
32584TORCH_FN(wrapper_CPU__linalg_eig));
32585m.impl("linalg_eig.out",
32586TORCH_FN(wrapper_CPU_out_linalg_eig_out));
32587m.impl("_linalg_eigh", TORCH_FN(wrapper_CPU__linalg_eigh));
32588m.impl("_linalg_eigh.eigenvalues", TORCH_FN(wrapper_CPU__linalg_eigh_out_eigenvalues));
32589m.impl("linalg_householder_product",
32590TORCH_FN(wrapper_CPU__linalg_householder_product));
32591m.impl("linalg_householder_product.out",
32592TORCH_FN(wrapper_CPU_out_linalg_householder_product_out));
32593m.impl("linalg_inv_ex", TORCH_FN(wrapper_CPU_linalg_inv_ex));
32594m.impl("linalg_inv_ex.inverse", TORCH_FN(wrapper_CPU_linalg_inv_ex_out_inverse));
32595m.impl("linalg_vector_norm", TORCH_FN(wrapper_CPU_linalg_vector_norm));
32596m.impl("linalg_vector_norm.out", TORCH_FN(wrapper_CPU_linalg_vector_norm_out_out));
32597m.impl("_linalg_svd", TORCH_FN(wrapper_CPU__linalg_svd));
32598m.impl("_linalg_svd.U", TORCH_FN(wrapper_CPU__linalg_svd_out_U));
32599m.impl("_linalg_solve_ex", TORCH_FN(wrapper_CPU__linalg_solve_ex));
32600m.impl("_linalg_solve_ex.result", TORCH_FN(wrapper_CPU__linalg_solve_ex_out_result));
32601m.impl("linalg_qr", TORCH_FN(wrapper_CPU_linalg_qr));
32602m.impl("linalg_qr.out", TORCH_FN(wrapper_CPU_linalg_qr_out_out));
32603m.impl("_test_optional_intlist",
32604TORCH_FN(wrapper_CPU___test_optional_intlist));
32605m.impl("_test_optional_filled_intlist",
32606TORCH_FN(wrapper_CPU___test_optional_filled_intlist));
32607m.impl("_test_optional_floatlist",
32608TORCH_FN(wrapper_CPU___test_optional_floatlist));
32609m.impl("segment_reduce",
32610TORCH_FN(wrapper_CPU__segment_reduce));
32611m.impl("_segment_reduce_backward",
32612TORCH_FN(wrapper_CPU___segment_reduce_backward));
32613m.impl("_transformer_encoder_layer_fwd",
32614TORCH_FN(wrapper_CPU___transformer_encoder_layer_fwd));
32615m.impl("_native_multi_head_attention",
32616TORCH_FN(wrapper_CPU___native_multi_head_attention));
32617m.impl("_fused_sdp_choice",
32618TORCH_FN(wrapper_CPU___fused_sdp_choice));
32619m.impl("special_airy_ai", TORCH_FN(wrapper_CPU_special_airy_ai));
32620m.impl("special_airy_ai.out", TORCH_FN(wrapper_CPU_special_airy_ai_out_out));
32621m.impl("_transformer_decoder_only_layer_fwd",
32622TORCH_FN(wrapper_CPU___transformer_decoder_only_layer_fwd));
32623m.impl("_native_decoder_only_multi_head_attention",
32624TORCH_FN(wrapper_CPU___native_decoder_only_multi_head_attention));
32625m.impl("special_bessel_j0", TORCH_FN(wrapper_CPU_special_bessel_j0));
32626m.impl("special_bessel_j0.out", TORCH_FN(wrapper_CPU_special_bessel_j0_out_out));
32627m.impl("special_bessel_j1", TORCH_FN(wrapper_CPU_special_bessel_j1));
32628m.impl("special_bessel_j1.out", TORCH_FN(wrapper_CPU_special_bessel_j1_out_out));
32629m.impl("special_bessel_y0", TORCH_FN(wrapper_CPU_special_bessel_y0));
32630m.impl("special_bessel_y0.out", TORCH_FN(wrapper_CPU_special_bessel_y0_out_out));
32631m.impl("special_bessel_y1", TORCH_FN(wrapper_CPU_special_bessel_y1));
32632m.impl("special_bessel_y1.out", TORCH_FN(wrapper_CPU_special_bessel_y1_out_out));
32633m.impl("special_chebyshev_polynomial_t", TORCH_FN(wrapper_CPU_special_chebyshev_polynomial_t));
32634m.impl("special_chebyshev_polynomial_t.out", TORCH_FN(wrapper_CPU_special_chebyshev_polynomial_t_out_out));
32635m.impl("special_chebyshev_polynomial_u", TORCH_FN(wrapper_CPU_special_chebyshev_polynomial_u));
32636m.impl("special_chebyshev_polynomial_u.out", TORCH_FN(wrapper_CPU_special_chebyshev_polynomial_u_out_out));
32637m.impl("special_chebyshev_polynomial_v", TORCH_FN(wrapper_CPU_special_chebyshev_polynomial_v));
32638m.impl("special_chebyshev_polynomial_v.out", TORCH_FN(wrapper_CPU_special_chebyshev_polynomial_v_out_out));
32639m.impl("special_chebyshev_polynomial_w", TORCH_FN(wrapper_CPU_special_chebyshev_polynomial_w));
32640m.impl("special_chebyshev_polynomial_w.out", TORCH_FN(wrapper_CPU_special_chebyshev_polynomial_w_out_out));
32641m.impl("special_hermite_polynomial_h", TORCH_FN(wrapper_CPU_special_hermite_polynomial_h));
32642m.impl("special_hermite_polynomial_h.out", TORCH_FN(wrapper_CPU_special_hermite_polynomial_h_out_out));
32643m.impl("special_hermite_polynomial_he", TORCH_FN(wrapper_CPU_special_hermite_polynomial_he));
32644m.impl("special_hermite_polynomial_he.out", TORCH_FN(wrapper_CPU_special_hermite_polynomial_he_out_out));
32645m.impl("special_laguerre_polynomial_l", TORCH_FN(wrapper_CPU_special_laguerre_polynomial_l));
32646m.impl("special_laguerre_polynomial_l.out", TORCH_FN(wrapper_CPU_special_laguerre_polynomial_l_out_out));
32647m.impl("special_legendre_polynomial_p", TORCH_FN(wrapper_CPU_special_legendre_polynomial_p));
32648m.impl("special_legendre_polynomial_p.out", TORCH_FN(wrapper_CPU_special_legendre_polynomial_p_out_out));
32649m.impl("special_modified_bessel_i0", TORCH_FN(wrapper_CPU_special_modified_bessel_i0));
32650m.impl("special_modified_bessel_i0.out", TORCH_FN(wrapper_CPU_special_modified_bessel_i0_out_out));
32651m.impl("special_modified_bessel_i1", TORCH_FN(wrapper_CPU_special_modified_bessel_i1));
32652m.impl("special_modified_bessel_i1.out", TORCH_FN(wrapper_CPU_special_modified_bessel_i1_out_out));
32653m.impl("special_modified_bessel_k0", TORCH_FN(wrapper_CPU_special_modified_bessel_k0));
32654m.impl("special_modified_bessel_k0.out", TORCH_FN(wrapper_CPU_special_modified_bessel_k0_out_out));
32655m.impl("special_modified_bessel_k1", TORCH_FN(wrapper_CPU_special_modified_bessel_k1));
32656m.impl("special_modified_bessel_k1.out", TORCH_FN(wrapper_CPU_special_modified_bessel_k1_out_out));
32657m.impl("special_scaled_modified_bessel_k0", TORCH_FN(wrapper_CPU_special_scaled_modified_bessel_k0));
32658m.impl("special_scaled_modified_bessel_k0.out", TORCH_FN(wrapper_CPU_special_scaled_modified_bessel_k0_out_out));
32659m.impl("special_scaled_modified_bessel_k1", TORCH_FN(wrapper_CPU_special_scaled_modified_bessel_k1));
32660m.impl("special_scaled_modified_bessel_k1.out", TORCH_FN(wrapper_CPU_special_scaled_modified_bessel_k1_out_out));
32661m.impl("special_shifted_chebyshev_polynomial_t", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_t));
32662m.impl("special_shifted_chebyshev_polynomial_t.out", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_t_out_out));
32663m.impl("special_shifted_chebyshev_polynomial_u", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_u));
32664m.impl("special_shifted_chebyshev_polynomial_u.out", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_u_out_out));
32665m.impl("special_shifted_chebyshev_polynomial_v", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_v));
32666m.impl("special_shifted_chebyshev_polynomial_v.out", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_v_out_out));
32667m.impl("special_shifted_chebyshev_polynomial_w", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_w));
32668m.impl("special_shifted_chebyshev_polynomial_w.out", TORCH_FN(wrapper_CPU_special_shifted_chebyshev_polynomial_w_out_out));
32669m.impl("special_spherical_bessel_j0", TORCH_FN(wrapper_CPU_special_spherical_bessel_j0));
32670m.impl("special_spherical_bessel_j0.out", TORCH_FN(wrapper_CPU_special_spherical_bessel_j0_out_out));
32671m.impl("_foobar",
32672TORCH_FN(wrapper_CPU___foobar));
32673};
32674} // anonymous namespace
32675namespace cpu {
32676void _assert_async(const at::Tensor & self) {
32677return wrapper_CPU___assert_async(self);
32678}
32679::std::tuple<at::Tensor,at::Tensor> native_dropout(const at::Tensor & input, double p, c10::optional<bool> train) {
32680return wrapper_CPU__native_dropout(input, p, train);
32681}
32682at::Tensor native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
32683return wrapper_CPU__native_dropout_backward(grad_output, mask, scale);
32684}
32685at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) {
32686return wrapper_CPU_out_abs_out(self, out);
32687}
32688at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) {
32689return wrapper_CPU_out_abs_out(self, out);
32690}
32691at::Tensor angle(const at::Tensor & self) {
32692return wrapper_CPU__angle(self);
32693}
32694at::Tensor & angle_out(at::Tensor & out, const at::Tensor & self) {
32695return wrapper_CPU_out_angle_out(self, out);
32696}
32697at::Tensor & angle_outf(const at::Tensor & self, at::Tensor & out) {
32698return wrapper_CPU_out_angle_out(self, out);
32699}
32700at::Tensor view_as_real(const at::Tensor & self) {
32701return wrapper_CPU__view_as_real(self);
32702}
32703at::Tensor view_as_complex(const at::Tensor & self) {
32704return wrapper_CPU__view_as_complex(self);
32705}
32706at::Tensor sgn(const at::Tensor & self) {
32707return wrapper_CPU_sgn(self);
32708}
32709at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self) {
32710return wrapper_CPU_sgn_out_out(self, out);
32711}
32712at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out) {
32713return wrapper_CPU_sgn_out_out(self, out);
32714}
32715at::Tensor & sgn_(at::Tensor & self) {
32716return wrapper_CPU_sgn_(self);
32717}
32718at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) {
32719return wrapper_CPU_out_conj_physical_out(self, out);
32720}
32721at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) {
32722return wrapper_CPU_out_conj_physical_out(self, out);
32723}
32724at::Tensor acos(const at::Tensor & self) {
32725return wrapper_CPU_acos(self);
32726}
32727at::Tensor & acos_out(at::Tensor & out, const at::Tensor & self) {
32728return wrapper_CPU_acos_out_out(self, out);
32729}
32730at::Tensor & acos_outf(const at::Tensor & self, at::Tensor & out) {
32731return wrapper_CPU_acos_out_out(self, out);
32732}
32733at::Tensor & acos_(at::Tensor & self) {
32734return wrapper_CPU_acos_(self);
32735}
32736at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
32737return wrapper_CPU_add_Tensor(self, other, alpha);
32738}
32739at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
32740return wrapper_CPU_add_out_out(self, other, alpha, out);
32741}
32742at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
32743return wrapper_CPU_add_out_out(self, other, alpha, out);
32744}
32745at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
32746return wrapper_CPU_add__Tensor(self, other, alpha);
32747}
32748at::Tensor _add_relu(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
32749return wrapper_CPU_Tensor__add_relu(self, other, alpha);
32750}
32751at::Tensor & _add_relu_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
32752return wrapper_CPU_out__add_relu_out(self, other, alpha, out);
32753}
32754at::Tensor & _add_relu_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
32755return wrapper_CPU_out__add_relu_out(self, other, alpha, out);
32756}
32757at::Tensor & _add_relu_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
32758return wrapper_CPU_Tensor__add_relu_(self, other, alpha);
32759}
32760at::Tensor _add_relu(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
32761return wrapper_CPU_Scalar__add_relu(self, other, alpha);
32762}
32763at::Tensor & _add_relu_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
32764return wrapper_CPU_Scalar__add_relu_(self, other, alpha);
32765}
32766at::Tensor addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
32767return wrapper_CPU_addmv(self, mat, vec, beta, alpha);
32768}
32769at::Tensor & addmv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
32770return wrapper_CPU_addmv_out_out(self, mat, vec, beta, alpha, out);
32771}
32772at::Tensor & addmv_outf(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
32773return wrapper_CPU_addmv_out_out(self, mat, vec, beta, alpha, out);
32774}
32775at::Tensor & addmv_(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
32776return wrapper_CPU_addmv_(self, mat, vec, beta, alpha);
32777}
32778at::Tensor addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
32779return wrapper_CPU__addr(self, vec1, vec2, beta, alpha);
32780}
32781at::Tensor & addr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
32782return wrapper_CPU_out_addr_out(self, vec1, vec2, beta, alpha, out);
32783}
32784at::Tensor & addr_outf(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
32785return wrapper_CPU_out_addr_out(self, vec1, vec2, beta, alpha, out);
32786}
32787at::Tensor all(const at::Tensor & self, int64_t dim, bool keepdim) {
32788return wrapper_CPU_all_dim(self, dim, keepdim);
32789}
32790at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim) {
32791return wrapper_CPU_all_out_out(self, dim, keepdim, out);
32792}
32793at::Tensor & all_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
32794return wrapper_CPU_all_out_out(self, dim, keepdim, out);
32795}
32796at::Tensor any(const at::Tensor & self, int64_t dim, bool keepdim) {
32797return wrapper_CPU_any_dim(self, dim, keepdim);
32798}
32799at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim) {
32800return wrapper_CPU_any_out_out(self, dim, keepdim, out);
32801}
32802at::Tensor & any_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
32803return wrapper_CPU_any_out_out(self, dim, keepdim, out);
32804}
32805at::Tensor & arange_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step) {
32806return wrapper_CPU_start_out_arange_out(start, end, step, out);
32807}
32808at::Tensor & arange_outf(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
32809return wrapper_CPU_start_out_arange_out(start, end, step, out);
32810}
32811at::Tensor argmax(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
32812return wrapper_CPU_argmax(self, dim, keepdim);
32813}
32814at::Tensor & argmax_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
32815return wrapper_CPU_argmax_out_out(self, dim, keepdim, out);
32816}
32817at::Tensor & argmax_outf(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
32818return wrapper_CPU_argmax_out_out(self, dim, keepdim, out);
32819}
32820at::Tensor argmin(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
32821return wrapper_CPU_argmin(self, dim, keepdim);
32822}
32823at::Tensor & argmin_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
32824return wrapper_CPU_argmin_out_out(self, dim, keepdim, out);
32825}
32826at::Tensor & argmin_outf(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
32827return wrapper_CPU_argmin_out_out(self, dim, keepdim, out);
32828}
32829at::Tensor acosh(const at::Tensor & self) {
32830return wrapper_CPU_acosh(self);
32831}
32832at::Tensor & acosh_out(at::Tensor & out, const at::Tensor & self) {
32833return wrapper_CPU_acosh_out_out(self, out);
32834}
32835at::Tensor & acosh_outf(const at::Tensor & self, at::Tensor & out) {
32836return wrapper_CPU_acosh_out_out(self, out);
32837}
32838at::Tensor & acosh_(at::Tensor & self) {
32839return wrapper_CPU_acosh_(self);
32840}
32841at::Tensor asinh(const at::Tensor & self) {
32842return wrapper_CPU_asinh(self);
32843}
32844at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self) {
32845return wrapper_CPU_asinh_out_out(self, out);
32846}
32847at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out) {
32848return wrapper_CPU_asinh_out_out(self, out);
32849}
32850at::Tensor & asinh_(at::Tensor & self) {
32851return wrapper_CPU_asinh_(self);
32852}
32853at::Tensor atanh(const at::Tensor & self) {
32854return wrapper_CPU_atanh(self);
32855}
32856at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) {
32857return wrapper_CPU_atanh_out_out(self, out);
32858}
32859at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) {
32860return wrapper_CPU_atanh_out_out(self, out);
32861}
32862at::Tensor & atanh_(at::Tensor & self) {
32863return wrapper_CPU_atanh_(self);
32864}
32865at::Tensor as_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset) {
32866return wrapper_CPU__as_strided(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
32867}
32868at::Tensor as_strided_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
32869return wrapper_CPU__as_strided(self, size, stride, storage_offset);
32870}
32871at::Tensor asin(const at::Tensor & self) {
32872return wrapper_CPU_asin(self);
32873}
32874at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) {
32875return wrapper_CPU_asin_out_out(self, out);
32876}
32877at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) {
32878return wrapper_CPU_asin_out_out(self, out);
32879}
32880at::Tensor & asin_(at::Tensor & self) {
32881return wrapper_CPU_asin_(self);
32882}
32883at::Tensor atan(const at::Tensor & self) {
32884return wrapper_CPU_atan(self);
32885}
32886at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self) {
32887return wrapper_CPU_atan_out_out(self, out);
32888}
32889at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out) {
32890return wrapper_CPU_atan_out_out(self, out);
32891}
32892at::Tensor & atan_(at::Tensor & self) {
32893return wrapper_CPU_atan_(self);
32894}
32895at::Tensor baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
32896return wrapper_CPU_baddbmm(self, batch1, batch2, beta, alpha);
32897}
32898at::Tensor & baddbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
32899return wrapper_CPU_baddbmm_out_out(self, batch1, batch2, beta, alpha, out);
32900}
32901at::Tensor & baddbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
32902return wrapper_CPU_baddbmm_out_out(self, batch1, batch2, beta, alpha, out);
32903}
32904at::Tensor & baddbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
32905return wrapper_CPU_baddbmm_(self, batch1, batch2, beta, alpha);
32906}
32907at::Tensor & bernoulli_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::Generator> generator) {
32908return wrapper_CPU_out_bernoulli_out(self, generator, out);
32909}
32910at::Tensor & bernoulli_outf(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) {
32911return wrapper_CPU_out_bernoulli_out(self, generator, out);
32912}
32913at::Tensor & bernoulli_(at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) {
32914return wrapper_CPU_Tensor_bernoulli_(self, p, generator);
32915}
32916at::Tensor & bernoulli_(at::Tensor & self, double p, c10::optional<at::Generator> generator) {
32917return wrapper_CPU_float_bernoulli_(self, p, generator);
32918}
32919at::Tensor binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
32920return wrapper_CPU__binary_cross_entropy(self, target, weight, reduction);
32921}
32922at::Tensor & binary_cross_entropy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
32923return wrapper_CPU_out_binary_cross_entropy_out(self, target, weight, reduction, out);
32924}
32925at::Tensor & binary_cross_entropy_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
32926return wrapper_CPU_out_binary_cross_entropy_out(self, target, weight, reduction, out);
32927}
32928at::Tensor binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
32929return wrapper_CPU__binary_cross_entropy_backward(grad_output, self, target, weight, reduction);
32930}
32931at::Tensor & binary_cross_entropy_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
32932return wrapper_CPU_grad_input_binary_cross_entropy_backward_out(grad_output, self, target, weight, reduction, grad_input);
32933}
32934at::Tensor & binary_cross_entropy_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
32935return wrapper_CPU_grad_input_binary_cross_entropy_backward_out(grad_output, self, target, weight, reduction, grad_input);
32936}
32937at::Tensor bincount(const at::Tensor & self, const c10::optional<at::Tensor> & weights, int64_t minlength) {
32938return wrapper_CPU__bincount(self, weights, minlength);
32939}
32940at::Tensor bitwise_not(const at::Tensor & self) {
32941return wrapper_CPU_bitwise_not(self);
32942}
32943at::Tensor & bitwise_not_out(at::Tensor & out, const at::Tensor & self) {
32944return wrapper_CPU_bitwise_not_out_out(self, out);
32945}
32946at::Tensor & bitwise_not_outf(const at::Tensor & self, at::Tensor & out) {
32947return wrapper_CPU_bitwise_not_out_out(self, out);
32948}
32949at::Tensor & bitwise_not_(at::Tensor & self) {
32950return wrapper_CPU_bitwise_not_(self);
32951}
32952at::Tensor copysign(const at::Tensor & self, const at::Tensor & other) {
32953return wrapper_CPU_copysign_Tensor(self, other);
32954}
32955at::Tensor & copysign_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
32956return wrapper_CPU_copysign_out_out(self, other, out);
32957}
32958at::Tensor & copysign_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
32959return wrapper_CPU_copysign_out_out(self, other, out);
32960}
32961at::Tensor & copysign_(at::Tensor & self, const at::Tensor & other) {
32962return wrapper_CPU_copysign__Tensor(self, other);
32963}
32964at::Tensor & logical_not_out(at::Tensor & out, const at::Tensor & self) {
32965return wrapper_CPU_out_logical_not_out(self, out);
32966}
32967at::Tensor & logical_not_outf(const at::Tensor & self, at::Tensor & out) {
32968return wrapper_CPU_out_logical_not_out(self, out);
32969}
32970at::Tensor & logical_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
32971return wrapper_CPU_out_logical_xor_out(self, other, out);
32972}
32973at::Tensor & logical_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
32974return wrapper_CPU_out_logical_xor_out(self, other, out);
32975}
32976at::Tensor & logical_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
32977return wrapper_CPU_out_logical_and_out(self, other, out);
32978}
32979at::Tensor & logical_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
32980return wrapper_CPU_out_logical_and_out(self, other, out);
32981}
32982at::Tensor & logical_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
32983return wrapper_CPU_out_logical_or_out(self, other, out);
32984}
32985at::Tensor & logical_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
32986return wrapper_CPU_out_logical_or_out(self, other, out);
32987}
32988at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2) {
32989return wrapper_CPU_bmm(self, mat2);
32990}
32991at::Tensor & bmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
32992return wrapper_CPU_bmm_out_out(self, mat2, out);
32993}
32994at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
32995return wrapper_CPU_bmm_out_out(self, mat2, out);
32996}
32997at::Tensor cat(const at::ITensorListRef & tensors, int64_t dim) {
32998return wrapper_CPU_cat(tensors, dim);
32999}
33000at::Tensor & cat_out(at::Tensor & out, const at::ITensorListRef & tensors, int64_t dim) {
33001return wrapper_CPU_cat_out_out(tensors, dim, out);
33002}
33003at::Tensor & cat_outf(const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) {
33004return wrapper_CPU_cat_out_out(tensors, dim, out);
33005}
33006at::Tensor ceil(const at::Tensor & self) {
33007return wrapper_CPU_ceil(self);
33008}
33009at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self) {
33010return wrapper_CPU_ceil_out_out(self, out);
33011}
33012at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out) {
33013return wrapper_CPU_ceil_out_out(self, out);
33014}
33015at::Tensor & ceil_(at::Tensor & self) {
33016return wrapper_CPU_ceil_(self);
33017}
33018at::Tensor clamp(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
33019return wrapper_CPU_clamp(self, min, max);
33020}
33021at::Tensor & clamp_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
33022return wrapper_CPU_clamp_out_out(self, min, max, out);
33023}
33024at::Tensor & clamp_outf(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max, at::Tensor & out) {
33025return wrapper_CPU_clamp_out_out(self, min, max, out);
33026}
33027at::Tensor & clamp_(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
33028return wrapper_CPU_clamp_(self, min, max);
33029}
33030at::Tensor clamp(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
33031return wrapper_CPU_clamp_Tensor(self, min, max);
33032}
33033at::Tensor & clamp_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
33034return wrapper_CPU_clamp_out_Tensor_out(self, min, max, out);
33035}
33036at::Tensor & clamp_outf(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max, at::Tensor & out) {
33037return wrapper_CPU_clamp_out_Tensor_out(self, min, max, out);
33038}
33039at::Tensor & clamp_(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
33040return wrapper_CPU_clamp__Tensor(self, min, max);
33041}
33042at::Tensor clamp_max(const at::Tensor & self, const at::Scalar & max) {
33043return wrapper_CPU_clamp_max(self, max);
33044}
33045at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & max) {
33046return wrapper_CPU_clamp_max_out_out(self, max, out);
33047}
33048at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Scalar & max, at::Tensor & out) {
33049return wrapper_CPU_clamp_max_out_out(self, max, out);
33050}
33051at::Tensor & clamp_max_(at::Tensor & self, const at::Scalar & max) {
33052return wrapper_CPU_clamp_max_(self, max);
33053}
33054at::Tensor clamp_max(const at::Tensor & self, const at::Tensor & max) {
33055return wrapper_CPU_clamp_max_Tensor(self, max);
33056}
33057at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & max) {
33058return wrapper_CPU_clamp_max_out_Tensor_out(self, max, out);
33059}
33060at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Tensor & max, at::Tensor & out) {
33061return wrapper_CPU_clamp_max_out_Tensor_out(self, max, out);
33062}
33063at::Tensor & clamp_max_(at::Tensor & self, const at::Tensor & max) {
33064return wrapper_CPU_clamp_max__Tensor(self, max);
33065}
33066at::Tensor clamp_min(const at::Tensor & self, const at::Scalar & min) {
33067return wrapper_CPU_clamp_min(self, min);
33068}
33069at::Tensor & clamp_min_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min) {
33070return wrapper_CPU_clamp_min_out_out(self, min, out);
33071}
33072at::Tensor & clamp_min_outf(const at::Tensor & self, const at::Scalar & min, at::Tensor & out) {
33073return wrapper_CPU_clamp_min_out_out(self, min, out);
33074}
33075at::Tensor & clamp_min_(at::Tensor & self, const at::Scalar & min) {
33076return wrapper_CPU_clamp_min_(self, min);
33077}
33078at::Tensor clamp_min(const at::Tensor & self, const at::Tensor & min) {
33079return wrapper_CPU_clamp_min_Tensor(self, min);
33080}
33081at::Tensor & clamp_min_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & min) {
33082return wrapper_CPU_clamp_min_out_Tensor_out(self, min, out);
33083}
33084at::Tensor & clamp_min_outf(const at::Tensor & self, const at::Tensor & min, at::Tensor & out) {
33085return wrapper_CPU_clamp_min_out_Tensor_out(self, min, out);
33086}
33087at::Tensor & clamp_min_(at::Tensor & self, const at::Tensor & min) {
33088return wrapper_CPU_clamp_min__Tensor(self, min);
33089}
33090at::Tensor & complex_out(at::Tensor & out, const at::Tensor & real, const at::Tensor & imag) {
33091return wrapper_CPU_out_complex_out(real, imag, out);
33092}
33093at::Tensor & complex_outf(const at::Tensor & real, const at::Tensor & imag, at::Tensor & out) {
33094return wrapper_CPU_out_complex_out(real, imag, out);
33095}
33096at::Tensor & polar_out(at::Tensor & out, const at::Tensor & abs, const at::Tensor & angle) {
33097return wrapper_CPU_out_polar_out(abs, angle, out);
33098}
33099at::Tensor & polar_outf(const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out) {
33100return wrapper_CPU_out_polar_out(abs, angle, out);
33101}
33102at::Tensor cos(const at::Tensor & self) {
33103return wrapper_CPU_cos(self);
33104}
33105at::Tensor & cos_out(at::Tensor & out, const at::Tensor & self) {
33106return wrapper_CPU_cos_out_out(self, out);
33107}
33108at::Tensor & cos_outf(const at::Tensor & self, at::Tensor & out) {
33109return wrapper_CPU_cos_out_out(self, out);
33110}
33111at::Tensor & cos_(at::Tensor & self) {
33112return wrapper_CPU_cos_(self);
33113}
33114at::Tensor cosh(const at::Tensor & self) {
33115return wrapper_CPU_cosh(self);
33116}
33117at::Tensor & cosh_out(at::Tensor & out, const at::Tensor & self) {
33118return wrapper_CPU_cosh_out_out(self, out);
33119}
33120at::Tensor & cosh_outf(const at::Tensor & self, at::Tensor & out) {
33121return wrapper_CPU_cosh_out_out(self, out);
33122}
33123at::Tensor & cosh_(at::Tensor & self) {
33124return wrapper_CPU_cosh_(self);
33125}
33126at::Tensor count_nonzero(const at::Tensor & self, at::IntArrayRef dim) {
33127return wrapper_CPU_dim_IntList_count_nonzero(self, dim);
33128}
33129void _cummax_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
33130return wrapper_CPU___cummax_helper(self, values, indices, dim);
33131}
33132void _cummin_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
33133return wrapper_CPU___cummin_helper(self, values, indices, dim);
33134}
33135at::Tensor cumprod(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
33136return wrapper_CPU_cumprod(self, dim, dtype);
33137}
33138at::Tensor & cumprod_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
33139return wrapper_CPU_cumprod_out_out(self, dim, dtype, out);
33140}
33141at::Tensor & cumprod_outf(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
33142return wrapper_CPU_cumprod_out_out(self, dim, dtype, out);
33143}
33144at::Tensor & cumprod_(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
33145return wrapper_CPU_cumprod_(self, dim, dtype);
33146}
33147at::Tensor cumsum(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
33148return wrapper_CPU_cumsum(self, dim, dtype);
33149}
33150at::Tensor & cumsum_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
33151return wrapper_CPU_cumsum_out_out(self, dim, dtype, out);
33152}
33153at::Tensor & cumsum_outf(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
33154return wrapper_CPU_cumsum_out_out(self, dim, dtype, out);
33155}
33156at::Tensor & cumsum_(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
33157return wrapper_CPU_cumsum_(self, dim, dtype);
33158}
33159::std::tuple<at::Tensor,at::Tensor> _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) {
33160return wrapper_CPU___ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
33161}
33162::std::tuple<at::Tensor,at::Tensor> _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) {
33163return wrapper_CPU_Tensor__ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
33164}
33165at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
33166return wrapper_CPU___ctc_loss_backward(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
33167}
33168at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
33169return wrapper_CPU_Tensor__ctc_loss_backward(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
33170}
33171at::Tensor div(const at::Tensor & self, const at::Tensor & other) {
33172return wrapper_CPU_div_Tensor(self, other);
33173}
33174at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
33175return wrapper_CPU_div_out_out(self, other, out);
33176}
33177at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
33178return wrapper_CPU_div_out_out(self, other, out);
33179}
33180at::Tensor & div_(at::Tensor & self, const at::Tensor & other) {
33181return wrapper_CPU_div__Tensor(self, other);
33182}
33183at::Tensor div(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
33184return wrapper_CPU_div_Tensor_mode(self, other, rounding_mode);
33185}
33186at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
33187return wrapper_CPU_div_out_out_mode(self, other, rounding_mode, out);
33188}
33189at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
33190return wrapper_CPU_div_out_out_mode(self, other, rounding_mode, out);
33191}
33192at::Tensor & div_(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
33193return wrapper_CPU_div__Tensor_mode(self, other, rounding_mode);
33194}
33195at::Tensor dot(const at::Tensor & self, const at::Tensor & tensor) {
33196return wrapper_CPU__dot(self, tensor);
33197}
33198at::Tensor vdot(const at::Tensor & self, const at::Tensor & other) {
33199return wrapper_CPU__vdot(self, other);
33200}
33201at::Tensor embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
33202return wrapper_CPU__embedding_dense_backward(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
33203}
33204at::Tensor embedding_dense_backward_symint(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
33205return wrapper_CPU__embedding_dense_backward(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
33206}
33207at::Tensor & embedding_renorm_(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
33208return wrapper_CPU__embedding_renorm_(self, indices, max_norm, norm_type);
33209}
33210::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_forward_only(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
33211return wrapper_CPU___embedding_bag_forward_only(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
33212}
33213::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
33214return wrapper_CPU___embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
33215}
33216at::Tensor _embedding_bag_dense_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
33217return wrapper_CPU___embedding_bag_dense_backward(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
33218}
33219at::Tensor _embedding_bag_dense_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
33220return wrapper_CPU___embedding_bag_dense_backward(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
33221}
33222at::Tensor _embedding_bag_per_sample_weights_backward(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) {
33223return wrapper_CPU___embedding_bag_per_sample_weights_backward(grad, weight, indices, offsets, offset2bag, mode, padding_idx);
33224}
33225at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
33226return wrapper_CPU_memory_format_empty(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
33227}
33228at::Tensor empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
33229return wrapper_CPU_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format);
33230}
33231at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
33232return wrapper_CPU_memory_format_empty(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
33233}
33234at::Tensor empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
33235return wrapper_CPU_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format);
33236}
33237at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) {
33238return wrapper_CPU___empty_affine_quantized(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
33239}
33240at::Tensor _empty_affine_quantized(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) {
33241return wrapper_CPU___empty_affine_quantized(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format);
33242}
33243at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options, c10::optional<at::MemoryFormat> memory_format) {
33244return wrapper_CPU___empty_per_channel_affine_quantized(size, scales, zero_points, axis, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
33245}
33246at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
33247return wrapper_CPU___empty_per_channel_affine_quantized(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
33248}
33249const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
33250return wrapper_CPU__resize_(self, c10::fromIntArrayRefSlow(size), memory_format);
33251}
33252const at::Tensor & resize__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
33253return wrapper_CPU__resize_(self, size, memory_format);
33254}
33255at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options) {
33256return wrapper_CPU__empty_strided(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
33257}
33258at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
33259return wrapper_CPU__empty_strided(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype, layout, device, pin_memory);
33260}
33261at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options) {
33262return wrapper_CPU__empty_strided(size, stride, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
33263}
33264at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
33265return wrapper_CPU__empty_strided(size, stride, dtype, layout, device, pin_memory);
33266}
33267at::Tensor erf(const at::Tensor & self) {
33268return wrapper_CPU_erf(self);
33269}
33270at::Tensor & erf_out(at::Tensor & out, const at::Tensor & self) {
33271return wrapper_CPU_erf_out_out(self, out);
33272}
33273at::Tensor & erf_outf(const at::Tensor & self, at::Tensor & out) {
33274return wrapper_CPU_erf_out_out(self, out);
33275}
33276at::Tensor & erf_(at::Tensor & self) {
33277return wrapper_CPU_erf_(self);
33278}
33279at::Tensor erfc(const at::Tensor & self) {
33280return wrapper_CPU_erfc(self);
33281}
33282at::Tensor & erfc_out(at::Tensor & out, const at::Tensor & self) {
33283return wrapper_CPU_erfc_out_out(self, out);
33284}
33285at::Tensor & erfc_outf(const at::Tensor & self, at::Tensor & out) {
33286return wrapper_CPU_erfc_out_out(self, out);
33287}
33288at::Tensor & erfc_(at::Tensor & self) {
33289return wrapper_CPU_erfc_(self);
33290}
33291at::Tensor exp(const at::Tensor & self) {
33292return wrapper_CPU_exp(self);
33293}
33294at::Tensor & exp_out(at::Tensor & out, const at::Tensor & self) {
33295return wrapper_CPU_exp_out_out(self, out);
33296}
33297at::Tensor & exp_outf(const at::Tensor & self, at::Tensor & out) {
33298return wrapper_CPU_exp_out_out(self, out);
33299}
33300at::Tensor & exp_(at::Tensor & self) {
33301return wrapper_CPU_exp_(self);
33302}
33303at::Tensor exp2(const at::Tensor & self) {
33304return wrapper_CPU_exp2(self);
33305}
33306at::Tensor & exp2_out(at::Tensor & out, const at::Tensor & self) {
33307return wrapper_CPU_exp2_out_out(self, out);
33308}
33309at::Tensor & exp2_outf(const at::Tensor & self, at::Tensor & out) {
33310return wrapper_CPU_exp2_out_out(self, out);
33311}
33312at::Tensor & exp2_(at::Tensor & self) {
33313return wrapper_CPU_exp2_(self);
33314}
33315at::Tensor expm1(const at::Tensor & self) {
33316return wrapper_CPU_expm1(self);
33317}
33318at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self) {
33319return wrapper_CPU_expm1_out_out(self, out);
33320}
33321at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out) {
33322return wrapper_CPU_expm1_out_out(self, out);
33323}
33324at::Tensor & expm1_(at::Tensor & self) {
33325return wrapper_CPU_expm1_(self);
33326}
33327at::Tensor & eye_out(at::Tensor & out, int64_t n) {
33328return wrapper_CPU_out_eye_out(n, out);
33329}
33330at::Tensor & eye_outf(int64_t n, at::Tensor & out) {
33331return wrapper_CPU_out_eye_out(n, out);
33332}
33333at::Tensor & eye_out(at::Tensor & out, int64_t n, int64_t m) {
33334return wrapper_CPU_m_out_eye_out(n, m, out);
33335}
33336at::Tensor & eye_outf(int64_t n, int64_t m, at::Tensor & out) {
33337return wrapper_CPU_m_out_eye_out(n, m, out);
33338}
33339at::Tensor & fill_(at::Tensor & self, const at::Scalar & value) {
33340return wrapper_CPU_Scalar_fill_(self, value);
33341}
33342at::Tensor & fill_(at::Tensor & self, const at::Tensor & value) {
33343return wrapper_CPU_Tensor_fill_(self, value);
33344}
33345at::Tensor floor(const at::Tensor & self) {
33346return wrapper_CPU_floor(self);
33347}
33348at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) {
33349return wrapper_CPU_floor_out_out(self, out);
33350}
33351at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) {
33352return wrapper_CPU_floor_out_out(self, out);
33353}
33354at::Tensor & floor_(at::Tensor & self) {
33355return wrapper_CPU_floor_(self);
33356}
33357at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other) {
33358return wrapper_CPU__floor_divide(self, other);
33359}
33360at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
33361return wrapper_CPU_out_floor_divide_out(self, other, out);
33362}
33363at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
33364return wrapper_CPU_out_floor_divide_out(self, other, out);
33365}
33366at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other) {
33367return wrapper_CPU_Tensor_floor_divide_(self, other);
33368}
33369at::Tensor frac(const at::Tensor & self) {
33370return wrapper_CPU_frac(self);
33371}
33372at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self) {
33373return wrapper_CPU_frac_out_out(self, out);
33374}
33375at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out) {
33376return wrapper_CPU_frac_out_out(self, out);
33377}
33378at::Tensor & frac_(at::Tensor & self) {
33379return wrapper_CPU_frac_(self);
33380}
33381at::Tensor from_file(c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, at::TensorOptions options) {
33382return wrapper_CPU__from_file(filename, shared, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
33383}
33384at::Tensor from_file(c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
33385return wrapper_CPU__from_file(filename, shared, size, dtype, layout, device, pin_memory);
33386}
33387at::Tensor gcd(const at::Tensor & self, const at::Tensor & other) {
33388return wrapper_CPU_gcd(self, other);
33389}
33390at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
33391return wrapper_CPU_gcd_out_out(self, other, out);
33392}
33393at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
33394return wrapper_CPU_gcd_out_out(self, other, out);
33395}
33396at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other) {
33397return wrapper_CPU_gcd_(self, other);
33398}
33399at::Tensor lcm(const at::Tensor & self, const at::Tensor & other) {
33400return wrapper_CPU_lcm(self, other);
33401}
33402at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
33403return wrapper_CPU_lcm_out_out(self, other, out);
33404}
33405at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
33406return wrapper_CPU_lcm_out_out(self, other, out);
33407}
33408at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other) {
33409return wrapper_CPU_lcm_(self, other);
33410}
33411at::Tensor grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
33412return wrapper_CPU__grid_sampler_2d(input, grid, interpolation_mode, padding_mode, align_corners);
33413}
33414::std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
33415return wrapper_CPU__grid_sampler_2d_backward(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
33416}
33417at::Tensor grid_sampler_3d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
33418return wrapper_CPU__grid_sampler_3d(input, grid, interpolation_mode, padding_mode, align_corners);
33419}
33420::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
33421return wrapper_CPU__grid_sampler_3d_backward(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
33422}
33423::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) {
33424return wrapper_CPU__native_group_norm(input, weight, bias, N, C, HxW, group, eps);
33425}
33426::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_symint(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
33427return wrapper_CPU__native_group_norm(input, weight, bias, N, C, HxW, group, eps);
33428}
33429::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array<bool,3> output_mask) {
33430return wrapper_CPU__native_group_norm_backward(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
33431}
33432::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward_symint(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
33433return wrapper_CPU__native_group_norm_backward(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
33434}
33435at::Tensor _fft_r2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
33436return wrapper_CPU___fft_r2c(self, dim, normalization, onesided);
33437}
33438at::Tensor & _fft_r2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
33439return wrapper_CPU_out__fft_r2c_out(self, dim, normalization, onesided, out);
33440}
33441at::Tensor & _fft_r2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out) {
33442return wrapper_CPU_out__fft_r2c_out(self, dim, normalization, onesided, out);
33443}
33444at::Tensor _fft_c2r(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) {
33445return wrapper_CPU___fft_c2r(self, dim, normalization, last_dim_size);
33446}
33447at::Tensor & _fft_c2r_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) {
33448return wrapper_CPU_out__fft_c2r_out(self, dim, normalization, last_dim_size, out);
33449}
33450at::Tensor & _fft_c2r_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size, at::Tensor & out) {
33451return wrapper_CPU_out__fft_c2r_out(self, dim, normalization, last_dim_size, out);
33452}
33453at::Tensor _fft_c2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) {
33454return wrapper_CPU___fft_c2c(self, c10::fromIntArrayRefSlow(dim), normalization, forward);
33455}
33456at::Tensor _fft_c2c_symint(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
33457return wrapper_CPU___fft_c2c(self, dim, normalization, forward);
33458}
33459at::Tensor & _fft_c2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) {
33460return wrapper_CPU_out__fft_c2c_out(self, c10::fromIntArrayRefSlow(dim), normalization, forward, out);
33461}
33462at::Tensor & _fft_c2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
33463return wrapper_CPU_out__fft_c2c_out(self, c10::fromIntArrayRefSlow(dim), normalization, forward, out);
33464}
33465at::Tensor & _fft_c2c_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
33466return wrapper_CPU_out__fft_c2c_out(self, dim, normalization, forward, out);
33467}
33468at::Tensor & _fft_c2c_symint_outf(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
33469return wrapper_CPU_out__fft_c2c_out(self, dim, normalization, forward, out);
33470}
33471void _validate_compressed_sparse_indices(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
33472return wrapper_CPU___validate_compressed_sparse_indices(is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
33473}
33474at::Tensor index(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
33475return wrapper_CPU_index_Tensor(self, indices);
33476}
33477at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
33478return wrapper_CPU_index_out_Tensor_out(self, indices, out);
33479}
33480at::Tensor & index_outf(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, at::Tensor & out) {
33481return wrapper_CPU_index_out_Tensor_out(self, indices, out);
33482}
33483at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
33484return wrapper_CPU_index_copy(self, dim, index, source);
33485}
33486at::Tensor & index_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
33487return wrapper_CPU_index_copy_out_out(self, dim, index, source, out);
33488}
33489at::Tensor & index_copy_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) {
33490return wrapper_CPU_index_copy_out_out(self, dim, index, source, out);
33491}
33492at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
33493return wrapper_CPU_index_copy_(self, dim, index, source);
33494}
33495at::Tensor & _index_put_impl_(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
33496return wrapper_CPU___index_put_impl_(self, indices, values, accumulate, unsafe);
33497}
33498at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
33499return wrapper_CPU_isin_Tensor_Tensor(elements, test_elements, assume_unique, invert);
33500}
33501at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
33502return wrapper_CPU_isin_out_Tensor_Tensor_out(elements, test_elements, assume_unique, invert, out);
33503}
33504at::Tensor & isin_outf(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
33505return wrapper_CPU_isin_out_Tensor_Tensor_out(elements, test_elements, assume_unique, invert, out);
33506}
33507at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
33508return wrapper_CPU_isin_Tensor_Scalar(elements, test_element, assume_unique, invert);
33509}
33510at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
33511return wrapper_CPU_isin_out_Tensor_Scalar_out(elements, test_element, assume_unique, invert, out);
33512}
33513at::Tensor & isin_outf(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) {
33514return wrapper_CPU_isin_out_Tensor_Scalar_out(elements, test_element, assume_unique, invert, out);
33515}
33516at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
33517return wrapper_CPU_isin_Scalar_Tensor(element, test_elements, assume_unique, invert);
33518}
33519at::Tensor & isin_out(at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
33520return wrapper_CPU_isin_out_Scalar_Tensor_out(element, test_elements, assume_unique, invert, out);
33521}
33522at::Tensor & isin_outf(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
33523return wrapper_CPU_isin_out_Scalar_Tensor_out(element, test_elements, assume_unique, invert, out);
33524}
33525at::Tensor isnan(const at::Tensor & self) {
33526return wrapper_CPU__isnan(self);
33527}
33528::std::tuple<at::Tensor &,at::Tensor &> kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) {
33529return wrapper_CPU_values_kthvalue_out(self, k, dim, keepdim, values, indices);
33530}
33531::std::tuple<at::Tensor &,at::Tensor &> kthvalue_outf(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
33532return wrapper_CPU_values_kthvalue_out(self, k, dim, keepdim, values, indices);
33533}
33534::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
33535return wrapper_CPU__native_layer_norm(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps);
33536}
33537::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
33538return wrapper_CPU__native_layer_norm(input, normalized_shape, weight, bias, eps);
33539}
33540::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
33541return wrapper_CPU__native_layer_norm_backward(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask);
33542}
33543::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward_symint(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
33544return wrapper_CPU__native_layer_norm_backward(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
33545}
33546at::Tensor & nan_to_num_out(at::Tensor & out, const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
33547return wrapper_CPU_out_nan_to_num_out(self, nan, posinf, neginf, out);
33548}
33549at::Tensor & nan_to_num_outf(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf, at::Tensor & out) {
33550return wrapper_CPU_out_nan_to_num_out(self, nan, posinf, neginf, out);
33551}
33552at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps) {
33553return wrapper_CPU_out_linspace_out(start, end, steps, out);
33554}
33555at::Tensor & linspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
33556return wrapper_CPU_out_linspace_out(start, end, steps, out);
33557}
33558at::Tensor log(const at::Tensor & self) {
33559return wrapper_CPU_log(self);
33560}
33561at::Tensor & log_out(at::Tensor & out, const at::Tensor & self) {
33562return wrapper_CPU_log_out_out(self, out);
33563}
33564at::Tensor & log_outf(const at::Tensor & self, at::Tensor & out) {
33565return wrapper_CPU_log_out_out(self, out);
33566}
33567at::Tensor & log_(at::Tensor & self) {
33568return wrapper_CPU_log_(self);
33569}
33570at::Tensor log10(const at::Tensor & self) {
33571return wrapper_CPU_log10(self);
33572}
33573at::Tensor & log10_out(at::Tensor & out, const at::Tensor & self) {
33574return wrapper_CPU_log10_out_out(self, out);
33575}
33576at::Tensor & log10_outf(const at::Tensor & self, at::Tensor & out) {
33577return wrapper_CPU_log10_out_out(self, out);
33578}
33579at::Tensor & log10_(at::Tensor & self) {
33580return wrapper_CPU_log10_(self);
33581}
33582at::Tensor log1p(const at::Tensor & self) {
33583return wrapper_CPU_log1p(self);
33584}
33585at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) {
33586return wrapper_CPU_log1p_out_out(self, out);
33587}
33588at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) {
33589return wrapper_CPU_log1p_out_out(self, out);
33590}
33591at::Tensor & log1p_(at::Tensor & self) {
33592return wrapper_CPU_log1p_(self);
33593}
33594at::Tensor log2(const at::Tensor & self) {
33595return wrapper_CPU_log2(self);
33596}
33597at::Tensor & log2_out(at::Tensor & out, const at::Tensor & self) {
33598return wrapper_CPU_log2_out_out(self, out);
33599}
33600at::Tensor & log2_outf(const at::Tensor & self, at::Tensor & out) {
33601return wrapper_CPU_log2_out_out(self, out);
33602}
33603at::Tensor & log2_(at::Tensor & self) {
33604return wrapper_CPU_log2_(self);
33605}
33606at::Tensor logaddexp(const at::Tensor & self, const at::Tensor & other) {
33607return wrapper_CPU_logaddexp(self, other);
33608}
33609at::Tensor & logaddexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
33610return wrapper_CPU_logaddexp_out_out(self, other, out);
33611}
33612at::Tensor & logaddexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
33613return wrapper_CPU_logaddexp_out_out(self, other, out);
33614}
33615at::Tensor logaddexp2(const at::Tensor & self, const at::Tensor & other) {
33616return wrapper_CPU_logaddexp2(self, other);
33617}
33618at::Tensor & logaddexp2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
33619return wrapper_CPU_logaddexp2_out_out(self, other, out);
33620}
33621at::Tensor & logaddexp2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
33622return wrapper_CPU_logaddexp2_out_out(self, other, out);
33623}
33624at::Tensor xlogy(const at::Tensor & self, const at::Tensor & other) {
33625return wrapper_CPU_xlogy_Tensor(self, other);
33626}
33627at::Tensor & xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
33628return wrapper_CPU_xlogy_out_OutTensor(self, other, out);
33629}
33630at::Tensor & xlogy_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
33631return wrapper_CPU_xlogy_out_OutTensor(self, other, out);
33632}
33633at::Tensor & xlogy_(at::Tensor & self, const at::Tensor & other) {
33634return wrapper_CPU_xlogy__Tensor(self, other);
33635}
33636at::Tensor & logspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base) {
33637return wrapper_CPU_out_logspace_out(start, end, steps, base, out);
33638}
33639at::Tensor & logspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
33640return wrapper_CPU_out_logspace_out(start, end, steps, base, out);
33641}
33642at::Tensor _log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
33643return wrapper_CPU__log_softmax(self, dim, half_to_float);
33644}
33645at::Tensor & _log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) {
33646return wrapper_CPU__log_softmax_out_out(self, dim, half_to_float, out);
33647}
33648at::Tensor & _log_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
33649return wrapper_CPU__log_softmax_out_out(self, dim, half_to_float, out);
33650}
33651at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
33652return wrapper_CPU__log_softmax_backward_data(grad_output, output, dim, input_dtype);
33653}
33654at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
33655return wrapper_CPU__log_softmax_backward_data_out_out(grad_output, output, dim, input_dtype, out);
33656}
33657at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) {
33658return wrapper_CPU__log_softmax_backward_data_out_out(grad_output, output, dim, input_dtype, out);
33659}
33660at::Tensor _logcumsumexp(const at::Tensor & self, int64_t dim) {
33661return wrapper_CPU___logcumsumexp(self, dim);
33662}
33663at::Tensor & _logcumsumexp_out(at::Tensor & out, const at::Tensor & self, int64_t dim) {
33664return wrapper_CPU_out__logcumsumexp_out(self, dim, out);
33665}
33666at::Tensor & _logcumsumexp_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) {
33667return wrapper_CPU_out__logcumsumexp_out(self, dim, out);
33668}
33669::std::tuple<at::Tensor,at::Tensor> _aminmax(const at::Tensor & self) {
33670return wrapper_CPU___aminmax(self);
33671}
33672::std::tuple<at::Tensor,at::Tensor> _aminmax(const at::Tensor & self, int64_t dim, bool keepdim) {
33673return wrapper_CPU_dim__aminmax(self, dim, keepdim);
33674}
33675::std::tuple<at::Tensor,at::Tensor> aminmax(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
33676return wrapper_CPU_aminmax(self, dim, keepdim);
33677}
33678::std::tuple<at::Tensor &,at::Tensor &> aminmax_out(at::Tensor & min, at::Tensor & max, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
33679return wrapper_CPU_aminmax_out_out(self, dim, keepdim, min, max);
33680}
33681::std::tuple<at::Tensor &,at::Tensor &> aminmax_outf(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max) {
33682return wrapper_CPU_aminmax_out_out(self, dim, keepdim, min, max);
33683}
33684at::Tensor _compute_linear_combination(const at::Tensor & input, const at::Tensor & coefficients) {
33685return wrapper_CPU___compute_linear_combination(input, coefficients);
33686}
33687at::Tensor & _compute_linear_combination_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & coefficients) {
33688return wrapper_CPU_out__compute_linear_combination_out(input, coefficients, out);
33689}
33690at::Tensor & _compute_linear_combination_outf(const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out) {
33691return wrapper_CPU_out__compute_linear_combination_out(input, coefficients, out);
33692}
33693::std::tuple<at::Tensor,at::Tensor> max(const at::Tensor & self, int64_t dim, bool keepdim) {
33694return wrapper_CPU_max_dim(self, dim, keepdim);
33695}
33696::std::tuple<at::Tensor &,at::Tensor &> max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim) {
33697return wrapper_CPU_max_out_dim_max(self, dim, keepdim, max, max_values);
33698}
33699::std::tuple<at::Tensor &,at::Tensor &> max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
33700return wrapper_CPU_max_out_dim_max(self, dim, keepdim, max, max_values);
33701}
33702at::Tensor amax(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
33703return wrapper_CPU_amax(self, dim, keepdim);
33704}
33705at::Tensor & amax_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
33706return wrapper_CPU_amax_out_out(self, dim, keepdim, out);
33707}
33708at::Tensor & amax_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
33709return wrapper_CPU_amax_out_out(self, dim, keepdim, out);
33710}
33711at::Tensor mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
33712return wrapper_CPU_mean_dim(self, dim, keepdim, dtype);
33713}
33714at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
33715return wrapper_CPU_mean_out_out(self, dim, keepdim, dtype, out);
33716}
33717at::Tensor & mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
33718return wrapper_CPU_mean_out_out(self, dim, keepdim, dtype, out);
33719}
33720at::Tensor median(const at::Tensor & self) {
33721return wrapper_CPU__median(self);
33722}
33723::std::tuple<at::Tensor &,at::Tensor &> median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim) {
33724return wrapper_CPU_dim_values_median_out(self, dim, keepdim, values, indices);
33725}
33726::std::tuple<at::Tensor &,at::Tensor &> median_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
33727return wrapper_CPU_dim_values_median_out(self, dim, keepdim, values, indices);
33728}
33729at::Tensor nanmedian(const at::Tensor & self) {
33730return wrapper_CPU__nanmedian(self);
33731}
33732::std::tuple<at::Tensor &,at::Tensor &> nanmedian_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim) {
33733return wrapper_CPU_dim_values_nanmedian_out(self, dim, keepdim, values, indices);
33734}
33735::std::tuple<at::Tensor &,at::Tensor &> nanmedian_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
33736return wrapper_CPU_dim_values_nanmedian_out(self, dim, keepdim, values, indices);
33737}
33738::std::tuple<at::Tensor,at::Tensor> min(const at::Tensor & self, int64_t dim, bool keepdim) {
33739return wrapper_CPU_min_dim(self, dim, keepdim);
33740}
33741::std::tuple<at::Tensor &,at::Tensor &> min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim) {
33742return wrapper_CPU_min_out_dim_min(self, dim, keepdim, min, min_indices);
33743}
33744::std::tuple<at::Tensor &,at::Tensor &> min_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
33745return wrapper_CPU_min_out_dim_min(self, dim, keepdim, min, min_indices);
33746}
33747at::Tensor amin(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
33748return wrapper_CPU_amin(self, dim, keepdim);
33749}
33750at::Tensor & amin_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
33751return wrapper_CPU_amin_out_out(self, dim, keepdim, out);
33752}
33753at::Tensor & amin_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
33754return wrapper_CPU_amin_out_out(self, dim, keepdim, out);
33755}
33756::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
33757return wrapper_CPU__mkldnn_rnn_layer(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
33758}
33759::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_backward(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) {
33760return wrapper_CPU__mkldnn_rnn_layer_backward(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace);
33761}
33762at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) {
33763return wrapper_CPU_mm(self, mat2);
33764}
33765at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
33766return wrapper_CPU_mm_out_out(self, mat2, out);
33767}
33768at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
33769return wrapper_CPU_mm_out_out(self, mat2, out);
33770}
33771::std::tuple<at::Tensor,at::Tensor> mode(const at::Tensor & self, int64_t dim, bool keepdim) {
33772return wrapper_CPU__mode(self, dim, keepdim);
33773}
33774at::Tensor mul(const at::Tensor & self, const at::Tensor & other) {
33775return wrapper_CPU_mul_Tensor(self, other);
33776}
33777at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
33778return wrapper_CPU_mul_out_out(self, other, out);
33779}
33780at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
33781return wrapper_CPU_mul_out_out(self, other, out);
33782}
33783at::Tensor & mul_(at::Tensor & self, const at::Tensor & other) {
33784return wrapper_CPU_mul__Tensor(self, other);
33785}
33786at::Tensor & mvlgamma_out(at::Tensor & out, const at::Tensor & self, int64_t p) {
33787return wrapper_CPU_out_mvlgamma_out(self, p, out);
33788}
33789at::Tensor & mvlgamma_outf(const at::Tensor & self, int64_t p, at::Tensor & out) {
33790return wrapper_CPU_out_mvlgamma_out(self, p, out);
33791}
33792at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
33793return wrapper_CPU__narrow_copy(self, dim, start, length);
33794}
33795at::Tensor narrow_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
33796return wrapper_CPU__narrow_copy(self, dim, start, length);
33797}
33798at::Tensor & narrow_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
33799return wrapper_CPU_out_narrow_copy_out(self, dim, start, length, out);
33800}
33801at::Tensor & narrow_copy_outf(const at::Tensor & self, int64_t dim, int64_t start, int64_t length, at::Tensor & out) {
33802return wrapper_CPU_out_narrow_copy_out(self, dim, start, length, out);
33803}
33804at::Tensor & narrow_copy_symint_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
33805return wrapper_CPU_out_narrow_copy_out(self, dim, start, length, out);
33806}
33807at::Tensor & narrow_copy_symint_outf(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) {
33808return wrapper_CPU_out_narrow_copy_out(self, dim, start, length, out);
33809}
33810::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
33811return wrapper_CPU__native_batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps);
33812}
33813::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
33814return wrapper_CPU_out_native_batch_norm_out(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
33815}
33816::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
33817return wrapper_CPU_out_native_batch_norm_out(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
33818}
33819::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
33820return wrapper_CPU_out__native_batch_norm_legit_out(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
33821}
33822::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
33823return wrapper_CPU_out__native_batch_norm_legit_out(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
33824}
33825::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
33826return wrapper_CPU___native_batch_norm_legit(input, weight, bias, running_mean, running_var, training, momentum, eps);
33827}
33828::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
33829return wrapper_CPU_no_stats__native_batch_norm_legit(input, weight, bias, training, momentum, eps);
33830}
33831::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
33832return wrapper_CPU_no_stats_out__native_batch_norm_legit_out(input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
33833}
33834::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
33835return wrapper_CPU_no_stats_out__native_batch_norm_legit_out(input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
33836}
33837::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
33838return wrapper_CPU__native_batch_norm_backward(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
33839}
33840::std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum) {
33841return wrapper_CPU__batch_norm_update_stats(input, running_mean, running_var, momentum);
33842}
33843at::Tensor _cdist_forward(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
33844return wrapper_CPU___cdist_forward(x1, x2, p, compute_mode);
33845}
33846at::Tensor _cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
33847return wrapper_CPU___cdist_backward(grad, x1, x2, p, cdist);
33848}
33849at::Tensor _pdist_forward(const at::Tensor & self, double p) {
33850return wrapper_CPU___pdist_forward(self, p);
33851}
33852at::Tensor _pdist_backward(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
33853return wrapper_CPU___pdist_backward(grad, self, p, pdist);
33854}
33855at::Tensor pixel_shuffle(const at::Tensor & self, int64_t upscale_factor) {
33856return wrapper_CPU__pixel_shuffle(self, upscale_factor);
33857}
33858at::Tensor pixel_unshuffle(const at::Tensor & self, int64_t downscale_factor) {
33859return wrapper_CPU__pixel_unshuffle(self, downscale_factor);
33860}
33861at::Tensor channel_shuffle(const at::Tensor & self, int64_t groups) {
33862return wrapper_CPU__channel_shuffle(self, groups);
33863}
33864at::Tensor native_channel_shuffle(const at::Tensor & self, int64_t groups) {
33865return wrapper_CPU__native_channel_shuffle(self, groups);
33866}
33867at::Tensor & randperm_out(at::Tensor & out, int64_t n, c10::optional<at::Generator> generator) {
33868return wrapper_CPU_generator_out_randperm_out(n, generator, out);
33869}
33870at::Tensor & randperm_outf(int64_t n, c10::optional<at::Generator> generator, at::Tensor & out) {
33871return wrapper_CPU_generator_out_randperm_out(n, generator, out);
33872}
33873at::Tensor & range_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step) {
33874return wrapper_CPU_out_range_out(start, end, step, out);
33875}
33876at::Tensor & range_outf(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
33877return wrapper_CPU_out_range_out(start, end, step, out);
33878}
33879at::Tensor reciprocal(const at::Tensor & self) {
33880return wrapper_CPU_reciprocal(self);
33881}
33882at::Tensor & reciprocal_out(at::Tensor & out, const at::Tensor & self) {
33883return wrapper_CPU_reciprocal_out_out(self, out);
33884}
33885at::Tensor & reciprocal_outf(const at::Tensor & self, at::Tensor & out) {
33886return wrapper_CPU_reciprocal_out_out(self, out);
33887}
33888at::Tensor & reciprocal_(at::Tensor & self) {
33889return wrapper_CPU_reciprocal_(self);
33890}
33891at::Tensor neg(const at::Tensor & self) {
33892return wrapper_CPU_neg(self);
33893}
33894at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self) {
33895return wrapper_CPU_neg_out_out(self, out);
33896}
33897at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out) {
33898return wrapper_CPU_neg_out_out(self, out);
33899}
33900at::Tensor & neg_(at::Tensor & self) {
33901return wrapper_CPU_neg_(self);
33902}
33903at::Tensor repeat_interleave(const at::Tensor & repeats, c10::optional<int64_t> output_size) {
33904return wrapper_CPU_Tensor_repeat_interleave(repeats, output_size);
33905}
33906at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
33907return wrapper_CPU___reshape_alias(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
33908}
33909at::Tensor _reshape_alias_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
33910return wrapper_CPU___reshape_alias(self, size, stride);
33911}
33912at::Tensor round(const at::Tensor & self) {
33913return wrapper_CPU_round(self);
33914}
33915at::Tensor & round_out(at::Tensor & out, const at::Tensor & self) {
33916return wrapper_CPU_round_out_out(self, out);
33917}
33918at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out) {
33919return wrapper_CPU_round_out_out(self, out);
33920}
33921at::Tensor & round_(at::Tensor & self) {
33922return wrapper_CPU_round_(self);
33923}
33924at::Tensor round(const at::Tensor & self, int64_t decimals) {
33925return wrapper_CPU_round_decimals(self, decimals);
33926}
33927at::Tensor & round_out(at::Tensor & out, const at::Tensor & self, int64_t decimals) {
33928return wrapper_CPU_round_out_decimals_out(self, decimals, out);
33929}
33930at::Tensor & round_outf(const at::Tensor & self, int64_t decimals, at::Tensor & out) {
33931return wrapper_CPU_round_out_decimals_out(self, decimals, out);
33932}
33933at::Tensor & round_(at::Tensor & self, int64_t decimals) {
33934return wrapper_CPU_round__decimals(self, decimals);
33935}
33936at::Tensor relu(const at::Tensor & self) {
33937return wrapper_CPU__relu(self);
33938}
33939at::Tensor & relu_(at::Tensor & self) {
33940return wrapper_CPU__relu_(self);
33941}
33942at::Tensor _prelu_kernel(const at::Tensor & self, const at::Tensor & weight) {
33943return wrapper_CPU___prelu_kernel(self, weight);
33944}
33945::std::tuple<at::Tensor,at::Tensor> _prelu_kernel_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
33946return wrapper_CPU___prelu_kernel_backward(grad_output, self, weight);
33947}
33948at::Tensor gelu(const at::Tensor & self, c10::string_view approximate) {
33949return wrapper_CPU_gelu(self, approximate);
33950}
33951at::Tensor & gelu_out(at::Tensor & out, const at::Tensor & self, c10::string_view approximate) {
33952return wrapper_CPU_gelu_out_out(self, approximate, out);
33953}
33954at::Tensor & gelu_outf(const at::Tensor & self, c10::string_view approximate, at::Tensor & out) {
33955return wrapper_CPU_gelu_out_out(self, approximate, out);
33956}
33957at::Tensor & gelu_(at::Tensor & self, c10::string_view approximate) {
33958return wrapper_CPU_gelu_(self, approximate);
33959}
33960at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
33961return wrapper_CPU_gelu_backward(grad_output, self, approximate);
33962}
33963at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
33964return wrapper_CPU_gelu_backward_out_grad_input(grad_output, self, approximate, grad_input);
33965}
33966at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input) {
33967return wrapper_CPU_gelu_backward_out_grad_input(grad_output, self, approximate, grad_input);
33968}
33969at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd) {
33970return wrapper_CPU_hardshrink(self, lambd);
33971}
33972at::Tensor & hardshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd) {
33973return wrapper_CPU_hardshrink_out_out(self, lambd, out);
33974}
33975at::Tensor & hardshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
33976return wrapper_CPU_hardshrink_out_out(self, lambd, out);
33977}
33978at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
33979return wrapper_CPU_hardshrink_backward(grad_out, self, lambd);
33980}
33981at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
33982return wrapper_CPU_hardshrink_backward_out_grad_input(grad_out, self, lambd, grad_input);
33983}
33984at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
33985return wrapper_CPU_hardshrink_backward_out_grad_input(grad_out, self, lambd, grad_input);
33986}
33987at::Tensor rsqrt(const at::Tensor & self) {
33988return wrapper_CPU_rsqrt(self);
33989}
33990at::Tensor & rsqrt_out(at::Tensor & out, const at::Tensor & self) {
33991return wrapper_CPU_rsqrt_out_out(self, out);
33992}
33993at::Tensor & rsqrt_outf(const at::Tensor & self, at::Tensor & out) {
33994return wrapper_CPU_rsqrt_out_out(self, out);
33995}
33996at::Tensor & rsqrt_(at::Tensor & self) {
33997return wrapper_CPU_rsqrt_(self);
33998}
33999at::Tensor silu(const at::Tensor & self) {
34000return wrapper_CPU_silu(self);
34001}
34002at::Tensor & silu_out(at::Tensor & out, const at::Tensor & self) {
34003return wrapper_CPU_silu_out_out(self, out);
34004}
34005at::Tensor & silu_outf(const at::Tensor & self, at::Tensor & out) {
34006return wrapper_CPU_silu_out_out(self, out);
34007}
34008at::Tensor & silu_(at::Tensor & self) {
34009return wrapper_CPU_silu_(self);
34010}
34011at::Tensor silu_backward(const at::Tensor & grad_output, const at::Tensor & self) {
34012return wrapper_CPU_silu_backward(grad_output, self);
34013}
34014at::Tensor & silu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) {
34015return wrapper_CPU_silu_backward_out_grad_input(grad_output, self, grad_input);
34016}
34017at::Tensor & silu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
34018return wrapper_CPU_silu_backward_out_grad_input(grad_output, self, grad_input);
34019}
34020at::Tensor mish(const at::Tensor & self) {
34021return wrapper_CPU_mish(self);
34022}
34023at::Tensor & mish_out(at::Tensor & out, const at::Tensor & self) {
34024return wrapper_CPU_mish_out_out(self, out);
34025}
34026at::Tensor & mish_outf(const at::Tensor & self, at::Tensor & out) {
34027return wrapper_CPU_mish_out_out(self, out);
34028}
34029at::Tensor & mish_(at::Tensor & self) {
34030return wrapper_CPU_mish_(self);
34031}
34032at::Tensor mish_backward(const at::Tensor & grad_output, const at::Tensor & self) {
34033return wrapper_CPU__mish_backward(grad_output, self);
34034}
34035at::Tensor sigmoid(const at::Tensor & self) {
34036return wrapper_CPU_sigmoid(self);
34037}
34038at::Tensor & sigmoid_out(at::Tensor & out, const at::Tensor & self) {
34039return wrapper_CPU_sigmoid_out_out(self, out);
34040}
34041at::Tensor & sigmoid_outf(const at::Tensor & self, at::Tensor & out) {
34042return wrapper_CPU_sigmoid_out_out(self, out);
34043}
34044at::Tensor & sigmoid_(at::Tensor & self) {
34045return wrapper_CPU_sigmoid_(self);
34046}
34047at::Tensor logit(const at::Tensor & self, c10::optional<double> eps) {
34048return wrapper_CPU__logit(self, eps);
34049}
34050at::Tensor & logit_out(at::Tensor & out, const at::Tensor & self, c10::optional<double> eps) {
34051return wrapper_CPU_out_logit_out(self, eps, out);
34052}
34053at::Tensor & logit_outf(const at::Tensor & self, c10::optional<double> eps, at::Tensor & out) {
34054return wrapper_CPU_out_logit_out(self, eps, out);
34055}
34056at::Tensor & logit_(at::Tensor & self, c10::optional<double> eps) {
34057return wrapper_CPU__logit_(self, eps);
34058}
34059at::Tensor sin(const at::Tensor & self) {
34060return wrapper_CPU_sin(self);
34061}
34062at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self) {
34063return wrapper_CPU_sin_out_out(self, out);
34064}
34065at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out) {
34066return wrapper_CPU_sin_out_out(self, out);
34067}
34068at::Tensor & sin_(at::Tensor & self) {
34069return wrapper_CPU_sin_(self);
34070}
34071at::Tensor sinc(const at::Tensor & self) {
34072return wrapper_CPU_sinc(self);
34073}
34074at::Tensor & sinc_out(at::Tensor & out, const at::Tensor & self) {
34075return wrapper_CPU_sinc_out_out(self, out);
34076}
34077at::Tensor & sinc_outf(const at::Tensor & self, at::Tensor & out) {
34078return wrapper_CPU_sinc_out_out(self, out);
34079}
34080at::Tensor & sinc_(at::Tensor & self) {
34081return wrapper_CPU_sinc_(self);
34082}
34083at::Tensor sinh(const at::Tensor & self) {
34084return wrapper_CPU_sinh(self);
34085}
34086at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self) {
34087return wrapper_CPU_sinh_out_out(self, out);
34088}
34089at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out) {
34090return wrapper_CPU_sinh_out_out(self, out);
34091}
34092at::Tensor & sinh_(at::Tensor & self) {
34093return wrapper_CPU_sinh_(self);
34094}
34095at::Tensor _softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
34096return wrapper_CPU__softmax(self, dim, half_to_float);
34097}
34098at::Tensor & _softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) {
34099return wrapper_CPU__softmax_out_out(self, dim, half_to_float, out);
34100}
34101at::Tensor & _softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
34102return wrapper_CPU__softmax_out_out(self, dim, half_to_float, out);
34103}
34104at::Tensor _softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
34105return wrapper_CPU__softmax_backward_data(grad_output, output, dim, input_dtype);
34106}
34107at::Tensor & _softmax_backward_data_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
34108return wrapper_CPU__softmax_backward_data_out_out(grad_output, output, dim, input_dtype, grad_input);
34109}
34110at::Tensor & _softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) {
34111return wrapper_CPU__softmax_backward_data_out_out(grad_output, output, dim, input_dtype, grad_input);
34112}
34113at::Tensor & sspaddmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
34114return wrapper_CPU_out_sspaddmm_out(self, mat1, mat2, beta, alpha, out);
34115}
34116at::Tensor & sspaddmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
34117return wrapper_CPU_out_sspaddmm_out(self, mat1, mat2, beta, alpha, out);
34118}
34119at::Tensor _stack(at::TensorList tensors, int64_t dim) {
34120return wrapper_CPU___stack(tensors, dim);
34121}
34122at::Tensor & _stack_out(at::Tensor & out, at::TensorList tensors, int64_t dim) {
34123return wrapper_CPU_out__stack_out(tensors, dim, out);
34124}
34125at::Tensor & _stack_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) {
34126return wrapper_CPU_out__stack_out(tensors, dim, out);
34127}
34128at::Tensor sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
34129return wrapper_CPU_sum_dim_IntList(self, dim, keepdim, dtype);
34130}
34131at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
34132return wrapper_CPU_sum_out_IntList_out(self, dim, keepdim, dtype, out);
34133}
34134at::Tensor & sum_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
34135return wrapper_CPU_sum_out_IntList_out(self, dim, keepdim, dtype, out);
34136}
34137at::Tensor nansum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
34138return wrapper_CPU__nansum(self, dim, keepdim, dtype);
34139}
34140at::Tensor & nansum_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
34141return wrapper_CPU_out_nansum_out(self, dim, keepdim, dtype, out);
34142}
34143at::Tensor & nansum_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
34144return wrapper_CPU_out_nansum_out(self, dim, keepdim, dtype, out);
34145}
34146at::Tensor sqrt(const at::Tensor & self) {
34147return wrapper_CPU_sqrt(self);
34148}
34149at::Tensor & sqrt_out(at::Tensor & out, const at::Tensor & self) {
34150return wrapper_CPU_sqrt_out_out(self, out);
34151}
34152at::Tensor & sqrt_outf(const at::Tensor & self, at::Tensor & out) {
34153return wrapper_CPU_sqrt_out_out(self, out);
34154}
34155at::Tensor & sqrt_(at::Tensor & self) {
34156return wrapper_CPU_sqrt_(self);
34157}
34158at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
34159return wrapper_CPU_correction_std(self, dim, correction, keepdim);
34160}
34161at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
34162return wrapper_CPU_correction_out_std_out(self, dim, correction, keepdim, out);
34163}
34164at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
34165return wrapper_CPU_correction_out_std_out(self, dim, correction, keepdim, out);
34166}
34167::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
34168return wrapper_CPU_correction_std_mean(self, dim, correction, keepdim);
34169}
34170at::Tensor prod(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
34171return wrapper_CPU__prod(self, dtype);
34172}
34173at::Tensor prod(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
34174return wrapper_CPU_prod_dim_int(self, dim, keepdim, dtype);
34175}
34176at::Tensor & prod_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
34177return wrapper_CPU_prod_out_int_out(self, dim, keepdim, dtype, out);
34178}
34179at::Tensor & prod_outf(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
34180return wrapper_CPU_prod_out_int_out(self, dim, keepdim, dtype, out);
34181}
34182at::Tensor tan(const at::Tensor & self) {
34183return wrapper_CPU_tan(self);
34184}
34185at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self) {
34186return wrapper_CPU_tan_out_out(self, out);
34187}
34188at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out) {
34189return wrapper_CPU_tan_out_out(self, out);
34190}
34191at::Tensor & tan_(at::Tensor & self) {
34192return wrapper_CPU_tan_(self);
34193}
34194at::Tensor tanh(const at::Tensor & self) {
34195return wrapper_CPU_tanh(self);
34196}
34197at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self) {
34198return wrapper_CPU_tanh_out_out(self, out);
34199}
34200at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) {
34201return wrapper_CPU_tanh_out_out(self, out);
34202}
34203at::Tensor & tanh_(at::Tensor & self) {
34204return wrapper_CPU_tanh_(self);
34205}
34206at::Tensor & tensordot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
34207return wrapper_CPU_out_tensordot_out(self, other, dims_self, dims_other, out);
34208}
34209at::Tensor & tensordot_outf(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out) {
34210return wrapper_CPU_out_tensordot_out(self, other, dims_self, dims_other, out);
34211}
34212at::Tensor threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
34213return wrapper_CPU_threshold(self, threshold, value);
34214}
34215at::Tensor & threshold_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
34216return wrapper_CPU_threshold_out_out(self, threshold, value, out);
34217}
34218at::Tensor & threshold_outf(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out) {
34219return wrapper_CPU_threshold_out_out(self, threshold, value, out);
34220}
34221at::Tensor & threshold_(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
34222return wrapper_CPU_threshold_(self, threshold, value);
34223}
34224at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
34225return wrapper_CPU_threshold_backward(grad_output, self, threshold);
34226}
34227at::Tensor & threshold_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
34228return wrapper_CPU_threshold_backward_out_grad_input(grad_output, self, threshold, grad_input);
34229}
34230at::Tensor & threshold_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
34231return wrapper_CPU_threshold_backward_out_grad_input(grad_output, self, threshold, grad_input);
34232}
34233at::Tensor flip(const at::Tensor & self, at::IntArrayRef dims) {
34234return wrapper_CPU__flip(self, dims);
34235}
34236at::Tensor roll(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims) {
34237return wrapper_CPU__roll(self, shifts, dims);
34238}
34239::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transform_bias_rescale_qkv(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
34240return wrapper_CPU___transform_bias_rescale_qkv(qkv, qkv_bias, num_heads);
34241}
34242at::Tensor _nested_tensor_from_mask(const at::Tensor & t, const at::Tensor & mask, bool mask_check) {
34243return wrapper_CPU___nested_tensor_from_mask(t, mask, mask_check);
34244}
34245bool _nested_tensor_from_mask_left_aligned(const at::Tensor & t, const at::Tensor & mask) {
34246return wrapper_CPU___nested_tensor_from_mask_left_aligned(t, mask);
34247}
34248at::Tensor _nested_from_padded(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) {
34249return wrapper_CPU___nested_from_padded(padded, cpu_nested_shape_example, fuse_transform_0213);
34250}
34251at::Tensor _nested_view_from_buffer(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
34252return wrapper_CPU___nested_view_from_buffer(self, nested_size, nested_strides, offsets);
34253}
34254at::Tensor trunc(const at::Tensor & self) {
34255return wrapper_CPU_trunc(self);
34256}
34257at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self) {
34258return wrapper_CPU_trunc_out_out(self, out);
34259}
34260at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out) {
34261return wrapper_CPU_trunc_out_out(self, out);
34262}
34263at::Tensor & trunc_(at::Tensor & self) {
34264return wrapper_CPU_trunc_(self);
34265}
34266::std::tuple<at::Tensor,at::Tensor> _unique(const at::Tensor & self, bool sorted, bool return_inverse) {
34267return wrapper_CPU___unique(self, sorted, return_inverse);
34268}
34269::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
34270return wrapper_CPU__unique_dim(self, dim, sorted, return_inverse, return_counts);
34271}
34272::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_consecutive(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim) {
34273return wrapper_CPU__unique_consecutive(self, return_inverse, return_counts, dim);
34274}
34275::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_consecutive(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) {
34276return wrapper_CPU__unique_dim_consecutive(self, dim, return_inverse, return_counts);
34277}
34278::std::tuple<at::Tensor,at::Tensor,at::Tensor> _unique2(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) {
34279return wrapper_CPU___unique2(self, sorted, return_inverse, return_counts);
34280}
34281at::Tensor var(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
34282return wrapper_CPU_correction_var(self, dim, correction, keepdim);
34283}
34284at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
34285return wrapper_CPU_correction_out_var_out(self, dim, correction, keepdim, out);
34286}
34287at::Tensor & var_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
34288return wrapper_CPU_correction_out_var_out(self, dim, correction, keepdim, out);
34289}
34290::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
34291return wrapper_CPU_correction_var_mean(self, dim, correction, keepdim);
34292}
34293at::Tensor where(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
34294return wrapper_CPU_self_where(condition, self, other);
34295}
34296at::Tensor & where_out(at::Tensor & out, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
34297return wrapper_CPU_self_out_where_out(condition, self, other, out);
34298}
34299at::Tensor & where_outf(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
34300return wrapper_CPU_self_out_where_out(condition, self, other, out);
34301}
34302::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
34303return wrapper_CPU___weight_norm_interface(v, g, dim);
34304}
34305::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
34306return wrapper_CPU___weight_norm_interface_backward(grad_w, saved_v, saved_g, saved_norms, dim);
34307}
34308at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options) {
34309return wrapper_CPU___efficientzerotensor(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
34310}
34311at::Tensor _efficientzerotensor(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
34312return wrapper_CPU___efficientzerotensor(size, dtype, layout, device, pin_memory);
34313}
34314at::Tensor _standard_gamma_grad(const at::Tensor & self, const at::Tensor & output) {
34315return wrapper_CPU___standard_gamma_grad(self, output);
34316}
34317at::Tensor _standard_gamma(const at::Tensor & self, c10::optional<at::Generator> generator) {
34318return wrapper_CPU___standard_gamma(self, generator);
34319}
34320at::Tensor _dirichlet_grad(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
34321return wrapper_CPU___dirichlet_grad(x, alpha, total);
34322}
34323at::Tensor _sample_dirichlet(const at::Tensor & self, c10::optional<at::Generator> generator) {
34324return wrapper_CPU___sample_dirichlet(self, generator);
34325}
34326at::Tensor poisson(const at::Tensor & self, c10::optional<at::Generator> generator) {
34327return wrapper_CPU__poisson(self, generator);
34328}
34329at::Tensor binomial(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator) {
34330return wrapper_CPU__binomial(count, prob, generator);
34331}
34332at::Tensor _spdiags(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout) {
34333return wrapper_CPU___spdiags(diagonals, offsets, shape, layout);
34334}
34335at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
34336return wrapper_CPU_norm_ScalarOpt_dim_dtype(self, p, dim, keepdim, dtype);
34337}
34338at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
34339return wrapper_CPU_norm_out_dtype_out(self, p, dim, keepdim, dtype, out);
34340}
34341at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
34342return wrapper_CPU_norm_out_dtype_out(self, p, dim, keepdim, dtype, out);
34343}
34344at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
34345return wrapper_CPU_norm_ScalarOpt_dim(self, p, dim, keepdim);
34346}
34347at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
34348return wrapper_CPU_norm_out_out(self, p, dim, keepdim, out);
34349}
34350at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
34351return wrapper_CPU_norm_out_out(self, p, dim, keepdim, out);
34352}
34353::std::tuple<at::Tensor &,at::Tensor &> frexp_out(at::Tensor & mantissa, at::Tensor & exponent, const at::Tensor & self) {
34354return wrapper_CPU_Tensor_out_frexp_out(self, mantissa, exponent);
34355}
34356::std::tuple<at::Tensor &,at::Tensor &> frexp_outf(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) {
34357return wrapper_CPU_Tensor_out_frexp_out(self, mantissa, exponent);
34358}
34359at::Tensor & zero_(at::Tensor & self) {
34360return wrapper_CPU__zero_(self);
34361}
34362at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
34363return wrapper_CPU_sub_Tensor(self, other, alpha);
34364}
34365at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
34366return wrapper_CPU_sub_out_out(self, other, alpha, out);
34367}
34368at::Tensor & sub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
34369return wrapper_CPU_sub_out_out(self, other, alpha, out);
34370}
34371at::Tensor & sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
34372return wrapper_CPU_sub__Tensor(self, other, alpha);
34373}
34374at::Tensor rsub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
34375return wrapper_CPU_Tensor_rsub(self, other, alpha);
34376}
34377at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values) {
34378return wrapper_CPU_heaviside(self, values);
34379}
34380at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values) {
34381return wrapper_CPU_heaviside_out_out(self, values, out);
34382}
34383at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out) {
34384return wrapper_CPU_heaviside_out_out(self, values, out);
34385}
34386at::Tensor & heaviside_(at::Tensor & self, const at::Tensor & values) {
34387return wrapper_CPU_heaviside_(self, values);
34388}
34389at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
34390return wrapper_CPU_addmm(self, mat1, mat2, beta, alpha);
34391}
34392at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
34393return wrapper_CPU_addmm_out_out(self, mat1, mat2, beta, alpha, out);
34394}
34395at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
34396return wrapper_CPU_addmm_out_out(self, mat1, mat2, beta, alpha, out);
34397}
34398at::Tensor & addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
34399return wrapper_CPU_addmm_(self, mat1, mat2, beta, alpha);
34400}
34401at::Tensor _addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
34402return wrapper_CPU__addmm_activation(self, mat1, mat2, beta, alpha, use_gelu);
34403}
34404at::Tensor & _addmm_activation_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
34405return wrapper_CPU__addmm_activation_out_out(self, mat1, mat2, beta, alpha, use_gelu, out);
34406}
34407at::Tensor & _addmm_activation_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) {
34408return wrapper_CPU__addmm_activation_out_out(self, mat1, mat2, beta, alpha, use_gelu, out);
34409}
34410int64_t sparse_dim(const at::Tensor & self) {
34411return wrapper_CPU__sparse_dim(self);
34412}
34413int64_t dense_dim(const at::Tensor & self) {
34414return wrapper_CPU__dense_dim(self);
34415}
34416at::Tensor to_sparse(const at::Tensor & self, int64_t sparse_dim) {
34417return wrapper_CPU_sparse_dim_to_sparse(self, sparse_dim);
34418}
34419at::Tensor to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
34420return wrapper_CPU__to_sparse(self, layout, blocksize, dense_dim);
34421}
34422at::Tensor to_sparse_csr(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
34423return wrapper_CPU__to_sparse_csr(self, dense_dim);
34424}
34425at::Tensor to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
34426return wrapper_CPU__to_sparse_csc(self, dense_dim);
34427}
34428at::Tensor to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
34429return wrapper_CPU__to_sparse_bsr(self, blocksize, dense_dim);
34430}
34431at::Tensor to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
34432return wrapper_CPU__to_sparse_bsc(self, blocksize, dense_dim);
34433}
34434at::Tensor to_mkldnn(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
34435return wrapper_CPU__to_mkldnn(self, dtype);
34436}
34437at::Tensor quantize_per_tensor_dynamic(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
34438return wrapper_CPU__quantize_per_tensor_dynamic(self, dtype, reduce_range);
34439}
34440at::Tensor quantize_per_tensor(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
34441return wrapper_CPU__quantize_per_tensor(self, scale, zero_point, dtype);
34442}
34443at::Tensor quantize_per_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
34444return wrapper_CPU_tensor_qparams_quantize_per_tensor(self, scale, zero_point, dtype);
34445}
34446::std::vector<at::Tensor> quantize_per_tensor(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
34447return wrapper_CPU_tensors_quantize_per_tensor(tensors, scales, zero_points, dtype);
34448}
34449at::Tensor quantize_per_channel(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
34450return wrapper_CPU__quantize_per_channel(self, scales, zero_points, axis, dtype);
34451}
34452at::Tensor dequantize(const at::Tensor & self) {
34453return wrapper_CPU_self_dequantize(self);
34454}
34455at::Tensor _make_per_tensor_quantized_tensor(const at::Tensor & self, double scale, int64_t zero_point) {
34456return wrapper_CPU___make_per_tensor_quantized_tensor(self, scale, zero_point);
34457}
34458at::Tensor _make_per_channel_quantized_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
34459return wrapper_CPU___make_per_channel_quantized_tensor(self, scale, zero_point, axis);
34460}
34461::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
34462return wrapper_CPU__fake_quantize_per_tensor_affine_cachemask(self, scale, zero_point, quant_min, quant_max);
34463}
34464::std::tuple<at::Tensor,at::Tensor> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
34465return wrapper_CPU___fake_quantize_per_tensor_affine_cachemask_tensor_qparams(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
34466}
34467at::Tensor _fake_quantize_learnable_per_tensor_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
34468return wrapper_CPU___fake_quantize_learnable_per_tensor_affine(self, scale, zero_point, quant_min, quant_max, grad_factor);
34469}
34470::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_tensor_affine_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
34471return wrapper_CPU___fake_quantize_learnable_per_tensor_affine_backward(grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
34472}
34473::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
34474return wrapper_CPU__fake_quantize_per_channel_affine_cachemask(self, scale, zero_point, axis, quant_min, quant_max);
34475}
34476at::Tensor _fake_quantize_learnable_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
34477return wrapper_CPU___fake_quantize_learnable_per_channel_affine(self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
34478}
34479::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_channel_affine_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
34480return wrapper_CPU___fake_quantize_learnable_per_channel_affine_backward(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
34481}
34482::std::tuple<at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
34483return wrapper_CPU___fused_moving_avg_obs_fq_helper(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
34484}
34485at::Scalar _local_scalar_dense(const at::Tensor & self) {
34486return wrapper_CPU___local_scalar_dense(self);
34487}
34488at::Tensor & set_(at::Tensor & self, at::Storage source) {
34489return wrapper_CPU_source_Storage_set_(self, source);
34490}
34491at::Tensor & set_(at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride) {
34492return wrapper_CPU_source_Storage_storage_offset_set_(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
34493}
34494at::Tensor & set__symint(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
34495return wrapper_CPU_source_Storage_storage_offset_set_(self, source, storage_offset, size, stride);
34496}
34497at::Tensor & set_(at::Tensor & self, const at::Tensor & source) {
34498return wrapper_CPU_source_Tensor_set_(self, source);
34499}
34500at::Tensor & set_(at::Tensor & self) {
34501return wrapper_CPU__set_(self);
34502}
34503bool is_set_to(const at::Tensor & self, const at::Tensor & tensor) {
34504return wrapper_CPU__is_set_to(self, tensor);
34505}
34506at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
34507return wrapper_CPU_Scalar_masked_fill_(self, mask, value);
34508}
34509at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
34510return wrapper_CPU_Tensor_masked_fill_(self, mask, value);
34511}
34512at::Tensor & masked_scatter_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
34513return wrapper_CPU__masked_scatter_(self, mask, source);
34514}
34515at::Tensor _masked_softmax(const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type) {
34516return wrapper_CPU___masked_softmax(self, mask, dim, mask_type);
34517}
34518at::Tensor _masked_softmax_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim) {
34519return wrapper_CPU___masked_softmax_backward(grad_output, output, mask, dim);
34520}
34521at::Tensor view(const at::Tensor & self, at::IntArrayRef size) {
34522return wrapper_CPU__view(self, c10::fromIntArrayRefSlow(size));
34523}
34524at::Tensor view_symint(const at::Tensor & self, c10::SymIntArrayRef size) {
34525return wrapper_CPU__view(self, size);
34526}
34527at::Tensor & put_(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
34528return wrapper_CPU__put_(self, index, source, accumulate);
34529}
34530at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
34531return wrapper_CPU_index_add(self, dim, index, source, alpha);
34532}
34533at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
34534return wrapper_CPU_index_add_out_out(self, dim, index, source, alpha, out);
34535}
34536at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) {
34537return wrapper_CPU_index_add_out_out(self, dim, index, source, alpha, out);
34538}
34539at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
34540return wrapper_CPU_index_add_(self, dim, index, source, alpha);
34541}
34542at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
34543return wrapper_CPU_index_reduce(self, dim, index, source, reduce, include_self);
34544}
34545at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
34546return wrapper_CPU_index_reduce_out_out(self, dim, index, source, reduce, include_self, out);
34547}
34548at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) {
34549return wrapper_CPU_index_reduce_out_out(self, dim, index, source, reduce, include_self, out);
34550}
34551at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
34552return wrapper_CPU_index_reduce_(self, dim, index, source, reduce, include_self);
34553}
34554at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
34555return wrapper_CPU_int_Scalar_index_fill_(self, dim, index, value);
34556}
34557at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
34558return wrapper_CPU_int_Tensor_index_fill_(self, dim, index, value);
34559}
34560at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
34561return wrapper_CPU_scatter_src(self, dim, index, src);
34562}
34563at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
34564return wrapper_CPU_scatter_out_src_out(self, dim, index, src, out);
34565}
34566at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
34567return wrapper_CPU_scatter_out_src_out(self, dim, index, src, out);
34568}
34569at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
34570return wrapper_CPU_scatter__src(self, dim, index, src);
34571}
34572at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
34573return wrapper_CPU_scatter_value(self, dim, index, value);
34574}
34575at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
34576return wrapper_CPU_scatter_out_value_out(self, dim, index, value, out);
34577}
34578at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
34579return wrapper_CPU_scatter_out_value_out(self, dim, index, value, out);
34580}
34581at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
34582return wrapper_CPU_scatter__value(self, dim, index, value);
34583}
34584at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
34585return wrapper_CPU_scatter_reduce(self, dim, index, src, reduce);
34586}
34587at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
34588return wrapper_CPU_scatter_out_reduce_out(self, dim, index, src, reduce, out);
34589}
34590at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) {
34591return wrapper_CPU_scatter_out_reduce_out(self, dim, index, src, reduce, out);
34592}
34593at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
34594return wrapper_CPU_scatter__reduce(self, dim, index, src, reduce);
34595}
34596at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
34597return wrapper_CPU_scatter_value_reduce(self, dim, index, value, reduce);
34598}
34599at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
34600return wrapper_CPU_scatter_out_value_reduce_out(self, dim, index, value, reduce, out);
34601}
34602at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) {
34603return wrapper_CPU_scatter_out_value_reduce_out(self, dim, index, value, reduce, out);
34604}
34605at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
34606return wrapper_CPU_scatter__value_reduce(self, dim, index, value, reduce);
34607}
34608at::Tensor scatter_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
34609return wrapper_CPU_scatter_add(self, dim, index, src);
34610}
34611at::Tensor & scatter_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
34612return wrapper_CPU_scatter_add_out_out(self, dim, index, src, out);
34613}
34614at::Tensor & scatter_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
34615return wrapper_CPU_scatter_add_out_out(self, dim, index, src, out);
34616}
34617at::Tensor & scatter_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
34618return wrapper_CPU_scatter_add_(self, dim, index, src);
34619}
34620at::Tensor scatter_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
34621return wrapper_CPU_scatter_reduce_two(self, dim, index, src, reduce, include_self);
34622}
34623at::Tensor & scatter_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
34624return wrapper_CPU_scatter_reduce_out_two_out(self, dim, index, src, reduce, include_self, out);
34625}
34626at::Tensor & scatter_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) {
34627return wrapper_CPU_scatter_reduce_out_two_out(self, dim, index, src, reduce, include_self, out);
34628}
34629at::Tensor & scatter_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
34630return wrapper_CPU_scatter_reduce__two(self, dim, index, src, reduce, include_self);
34631}
34632at::Tensor eq(const at::Tensor & self, const at::Scalar & other) {
34633return wrapper_CPU_eq_Scalar(self, other);
34634}
34635at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
34636return wrapper_CPU_eq_out_Scalar_out(self, other, out);
34637}
34638at::Tensor & eq_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
34639return wrapper_CPU_eq_out_Scalar_out(self, other, out);
34640}
34641at::Tensor & eq_(at::Tensor & self, const at::Scalar & other) {
34642return wrapper_CPU_eq__Scalar(self, other);
34643}
34644at::Tensor eq(const at::Tensor & self, const at::Tensor & other) {
34645return wrapper_CPU_eq_Tensor(self, other);
34646}
34647at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
34648return wrapper_CPU_eq_out_Tensor_out(self, other, out);
34649}
34650at::Tensor & eq_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
34651return wrapper_CPU_eq_out_Tensor_out(self, other, out);
34652}
34653at::Tensor & eq_(at::Tensor & self, const at::Tensor & other) {
34654return wrapper_CPU_eq__Tensor(self, other);
34655}
34656at::Tensor bitwise_and(const at::Tensor & self, const at::Tensor & other) {
34657return wrapper_CPU_bitwise_and_Tensor(self, other);
34658}
34659at::Tensor & bitwise_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
34660return wrapper_CPU_bitwise_and_out_Tensor_out(self, other, out);
34661}
34662at::Tensor & bitwise_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
34663return wrapper_CPU_bitwise_and_out_Tensor_out(self, other, out);
34664}
34665at::Tensor & bitwise_and_(at::Tensor & self, const at::Tensor & other) {
34666return wrapper_CPU_bitwise_and__Tensor(self, other);
34667}
34668at::Tensor bitwise_or(const at::Tensor & self, const at::Tensor & other) {
34669return wrapper_CPU_bitwise_or_Tensor(self, other);
34670}
34671at::Tensor & bitwise_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
34672return wrapper_CPU_bitwise_or_out_Tensor_out(self, other, out);
34673}
34674at::Tensor & bitwise_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
34675return wrapper_CPU_bitwise_or_out_Tensor_out(self, other, out);
34676}
34677at::Tensor & bitwise_or_(at::Tensor & self, const at::Tensor & other) {
34678return wrapper_CPU_bitwise_or__Tensor(self, other);
34679}
34680at::Tensor bitwise_xor(const at::Tensor & self, const at::Tensor & other) {
34681return wrapper_CPU_bitwise_xor_Tensor(self, other);
34682}
34683at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
34684return wrapper_CPU_bitwise_xor_out_Tensor_out(self, other, out);
34685}
34686at::Tensor & bitwise_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
34687return wrapper_CPU_bitwise_xor_out_Tensor_out(self, other, out);
34688}
34689at::Tensor & bitwise_xor_(at::Tensor & self, const at::Tensor & other) {
34690return wrapper_CPU_bitwise_xor__Tensor(self, other);
34691}
34692at::Tensor __lshift__(const at::Tensor & self, const at::Scalar & other) {
34693return wrapper_CPU_Scalar___lshift__(self, other);
34694}
34695at::Tensor & __ilshift__(at::Tensor & self, const at::Scalar & other) {
34696return wrapper_CPU_Scalar___ilshift__(self, other);
34697}
34698at::Tensor __lshift__(const at::Tensor & self, const at::Tensor & other) {
34699return wrapper_CPU_Tensor___lshift__(self, other);
34700}
34701at::Tensor & __ilshift__(at::Tensor & self, const at::Tensor & other) {
34702return wrapper_CPU_Tensor___ilshift__(self, other);
34703}
34704at::Tensor bitwise_left_shift(const at::Tensor & self, const at::Tensor & other) {
34705return wrapper_CPU_bitwise_left_shift_Tensor(self, other);
34706}
34707at::Tensor & bitwise_left_shift_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
34708return wrapper_CPU_bitwise_left_shift_out_Tensor_out(self, other, out);
34709}
34710at::Tensor & bitwise_left_shift_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
34711return wrapper_CPU_bitwise_left_shift_out_Tensor_out(self, other, out);
34712}
34713at::Tensor & bitwise_left_shift_(at::Tensor & self, const at::Tensor & other) {
34714return wrapper_CPU_bitwise_left_shift__Tensor(self, other);
34715}
34716at::Tensor __rshift__(const at::Tensor & self, const at::Scalar & other) {
34717return wrapper_CPU_Scalar___rshift__(self, other);
34718}
34719at::Tensor & __irshift__(at::Tensor & self, const at::Scalar & other) {
34720return wrapper_CPU_Scalar___irshift__(self, other);
34721}
34722at::Tensor __rshift__(const at::Tensor & self, const at::Tensor & other) {
34723return wrapper_CPU_Tensor___rshift__(self, other);
34724}
34725at::Tensor & __irshift__(at::Tensor & self, const at::Tensor & other) {
34726return wrapper_CPU_Tensor___irshift__(self, other);
34727}
34728at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Tensor & other) {
34729return wrapper_CPU_bitwise_right_shift_Tensor(self, other);
34730}
34731at::Tensor & bitwise_right_shift_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
34732return wrapper_CPU_bitwise_right_shift_out_Tensor_out(self, other, out);
34733}
34734at::Tensor & bitwise_right_shift_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
34735return wrapper_CPU_bitwise_right_shift_out_Tensor_out(self, other, out);
34736}
34737at::Tensor & bitwise_right_shift_(at::Tensor & self, const at::Tensor & other) {
34738return wrapper_CPU_bitwise_right_shift__Tensor(self, other);
34739}
34740at::Tensor tril(const at::Tensor & self, int64_t diagonal) {
34741return wrapper_CPU_tril(self, diagonal);
34742}
34743at::Tensor & tril_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal) {
34744return wrapper_CPU_tril_out_out(self, diagonal, out);
34745}
34746at::Tensor & tril_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
34747return wrapper_CPU_tril_out_out(self, diagonal, out);
34748}
34749at::Tensor & tril_(at::Tensor & self, int64_t diagonal) {
34750return wrapper_CPU_tril_(self, diagonal);
34751}
34752at::Tensor triu(const at::Tensor & self, int64_t diagonal) {
34753return wrapper_CPU_triu(self, diagonal);
34754}
34755at::Tensor & triu_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal) {
34756return wrapper_CPU_triu_out_out(self, diagonal, out);
34757}
34758at::Tensor & triu_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
34759return wrapper_CPU_triu_out_out(self, diagonal, out);
34760}
34761at::Tensor & triu_(at::Tensor & self, int64_t diagonal) {
34762return wrapper_CPU_triu_(self, diagonal);
34763}
34764at::Tensor digamma(const at::Tensor & self) {
34765return wrapper_CPU_digamma(self);
34766}
34767at::Tensor & digamma_out(at::Tensor & out, const at::Tensor & self) {
34768return wrapper_CPU_digamma_out_out(self, out);
34769}
34770at::Tensor & digamma_outf(const at::Tensor & self, at::Tensor & out) {
34771return wrapper_CPU_digamma_out_out(self, out);
34772}
34773at::Tensor & digamma_(at::Tensor & self) {
34774return wrapper_CPU_digamma_(self);
34775}
34776at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
34777return wrapper_CPU_lerp_Scalar(self, end, weight);
34778}
34779at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
34780return wrapper_CPU_lerp_out_Scalar_out(self, end, weight, out);
34781}
34782at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) {
34783return wrapper_CPU_lerp_out_Scalar_out(self, end, weight, out);
34784}
34785at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
34786return wrapper_CPU_lerp__Scalar(self, end, weight);
34787}
34788at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
34789return wrapper_CPU_lerp_Tensor(self, end, weight);
34790}
34791at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
34792return wrapper_CPU_lerp_out_Tensor_out(self, end, weight, out);
34793}
34794at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) {
34795return wrapper_CPU_lerp_out_Tensor_out(self, end, weight, out);
34796}
34797at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
34798return wrapper_CPU_lerp__Tensor(self, end, weight);
34799}
34800at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
34801return wrapper_CPU__addbmm(self, batch1, batch2, beta, alpha);
34802}
34803at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
34804return wrapper_CPU_out_addbmm_out(self, batch1, batch2, beta, alpha, out);
34805}
34806at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
34807return wrapper_CPU_out_addbmm_out(self, batch1, batch2, beta, alpha, out);
34808}
34809at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
34810return wrapper_CPU__addbmm_(self, batch1, batch2, beta, alpha);
34811}
34812at::Tensor & random_(at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
34813return wrapper_CPU_from_random_(self, from, to, generator);
34814}
34815at::Tensor & random_(at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
34816return wrapper_CPU_to_random_(self, to, generator);
34817}
34818at::Tensor & random_(at::Tensor & self, c10::optional<at::Generator> generator) {
34819return wrapper_CPU__random_(self, generator);
34820}
34821at::Tensor & uniform_(at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) {
34822return wrapper_CPU__uniform_(self, from, to, generator);
34823}
34824at::Tensor & cauchy_(at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
34825return wrapper_CPU__cauchy_(self, median, sigma, generator);
34826}
34827at::Tensor & log_normal_(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
34828return wrapper_CPU__log_normal_(self, mean, std, generator);
34829}
34830at::Tensor & exponential_(at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
34831return wrapper_CPU__exponential_(self, lambd, generator);
34832}
34833at::Tensor & geometric_(at::Tensor & self, double p, c10::optional<at::Generator> generator) {
34834return wrapper_CPU__geometric_(self, p, generator);
34835}
34836at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset, at::TensorOptions options) {
34837return wrapper_CPU__tril_indices(row, col, offset, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
34838}
34839at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
34840return wrapper_CPU__tril_indices(row, col, offset, dtype, layout, device, pin_memory);
34841}
34842at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset, at::TensorOptions options) {
34843return wrapper_CPU__triu_indices(row, col, offset, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
34844}
34845at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
34846return wrapper_CPU__triu_indices(row, col, offset, dtype, layout, device, pin_memory);
34847}
34848at::Tensor trace(const at::Tensor & self) {
34849return wrapper_CPU__trace(self);
34850}
34851at::Tensor ne(const at::Tensor & self, const at::Scalar & other) {
34852return wrapper_CPU_ne_Scalar(self, other);
34853}
34854at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
34855return wrapper_CPU_ne_out_Scalar_out(self, other, out);
34856}
34857at::Tensor & ne_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
34858return wrapper_CPU_ne_out_Scalar_out(self, other, out);
34859}
34860at::Tensor & ne_(at::Tensor & self, const at::Scalar & other) {
34861return wrapper_CPU_ne__Scalar(self, other);
34862}
34863at::Tensor ne(const at::Tensor & self, const at::Tensor & other) {
34864return wrapper_CPU_ne_Tensor(self, other);
34865}
34866at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
34867return wrapper_CPU_ne_out_Tensor_out(self, other, out);
34868}
34869at::Tensor & ne_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
34870return wrapper_CPU_ne_out_Tensor_out(self, other, out);
34871}
34872at::Tensor & ne_(at::Tensor & self, const at::Tensor & other) {
34873return wrapper_CPU_ne__Tensor(self, other);
34874}
34875at::Tensor ge(const at::Tensor & self, const at::Scalar & other) {
34876return wrapper_CPU_ge_Scalar(self, other);
34877}
34878at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
34879return wrapper_CPU_ge_out_Scalar_out(self, other, out);
34880}
34881at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
34882return wrapper_CPU_ge_out_Scalar_out(self, other, out);
34883}
34884at::Tensor & ge_(at::Tensor & self, const at::Scalar & other) {
34885return wrapper_CPU_ge__Scalar(self, other);
34886}
34887at::Tensor ge(const at::Tensor & self, const at::Tensor & other) {
34888return wrapper_CPU_ge_Tensor(self, other);
34889}
34890at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
34891return wrapper_CPU_ge_out_Tensor_out(self, other, out);
34892}
34893at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
34894return wrapper_CPU_ge_out_Tensor_out(self, other, out);
34895}
34896at::Tensor & ge_(at::Tensor & self, const at::Tensor & other) {
34897return wrapper_CPU_ge__Tensor(self, other);
34898}
34899at::Tensor le(const at::Tensor & self, const at::Scalar & other) {
34900return wrapper_CPU_le_Scalar(self, other);
34901}
34902at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
34903return wrapper_CPU_le_out_Scalar_out(self, other, out);
34904}
34905at::Tensor & le_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
34906return wrapper_CPU_le_out_Scalar_out(self, other, out);
34907}
34908at::Tensor & le_(at::Tensor & self, const at::Scalar & other) {
34909return wrapper_CPU_le__Scalar(self, other);
34910}
34911at::Tensor le(const at::Tensor & self, const at::Tensor & other) {
34912return wrapper_CPU_le_Tensor(self, other);
34913}
34914at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
34915return wrapper_CPU_le_out_Tensor_out(self, other, out);
34916}
34917at::Tensor & le_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
34918return wrapper_CPU_le_out_Tensor_out(self, other, out);
34919}
34920at::Tensor & le_(at::Tensor & self, const at::Tensor & other) {
34921return wrapper_CPU_le__Tensor(self, other);
34922}
34923at::Tensor gt(const at::Tensor & self, const at::Scalar & other) {
34924return wrapper_CPU_gt_Scalar(self, other);
34925}
34926at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
34927return wrapper_CPU_gt_out_Scalar_out(self, other, out);
34928}
34929at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
34930return wrapper_CPU_gt_out_Scalar_out(self, other, out);
34931}
34932at::Tensor & gt_(at::Tensor & self, const at::Scalar & other) {
34933return wrapper_CPU_gt__Scalar(self, other);
34934}
34935at::Tensor gt(const at::Tensor & self, const at::Tensor & other) {
34936return wrapper_CPU_gt_Tensor(self, other);
34937}
34938at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
34939return wrapper_CPU_gt_out_Tensor_out(self, other, out);
34940}
34941at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
34942return wrapper_CPU_gt_out_Tensor_out(self, other, out);
34943}
34944at::Tensor & gt_(at::Tensor & self, const at::Tensor & other) {
34945return wrapper_CPU_gt__Tensor(self, other);
34946}
34947at::Tensor lt(const at::Tensor & self, const at::Scalar & other) {
34948return wrapper_CPU_lt_Scalar(self, other);
34949}
34950at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
34951return wrapper_CPU_lt_out_Scalar_out(self, other, out);
34952}
34953at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
34954return wrapper_CPU_lt_out_Scalar_out(self, other, out);
34955}
34956at::Tensor & lt_(at::Tensor & self, const at::Scalar & other) {
34957return wrapper_CPU_lt__Scalar(self, other);
34958}
34959at::Tensor lt(const at::Tensor & self, const at::Tensor & other) {
34960return wrapper_CPU_lt_Tensor(self, other);
34961}
34962at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
34963return wrapper_CPU_lt_out_Tensor_out(self, other, out);
34964}
34965at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
34966return wrapper_CPU_lt_out_Tensor_out(self, other, out);
34967}
34968at::Tensor & lt_(at::Tensor & self, const at::Tensor & other) {
34969return wrapper_CPU_lt__Tensor(self, other);
34970}
34971at::Tensor take(const at::Tensor & self, const at::Tensor & index) {
34972return wrapper_CPU__take(self, index);
34973}
34974at::Tensor & take_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & index) {
34975return wrapper_CPU_out_take_out(self, index, out);
34976}
34977at::Tensor & take_outf(const at::Tensor & self, const at::Tensor & index, at::Tensor & out) {
34978return wrapper_CPU_out_take_out(self, index, out);
34979}
34980at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
34981return wrapper_CPU__index_select(self, dim, index);
34982}
34983at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index) {
34984return wrapper_CPU_out_index_select_out(self, dim, index, out);
34985}
34986at::Tensor & index_select_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) {
34987return wrapper_CPU_out_index_select_out(self, dim, index, out);
34988}
34989at::Tensor masked_select(const at::Tensor & self, const at::Tensor & mask) {
34990return wrapper_CPU__masked_select(self, mask);
34991}
34992at::Tensor & masked_select_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask) {
34993return wrapper_CPU_out_masked_select_out(self, mask, out);
34994}
34995at::Tensor & masked_select_outf(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
34996return wrapper_CPU_out_masked_select_out(self, mask, out);
34997}
34998at::Tensor nonzero(const at::Tensor & self) {
34999return wrapper_CPU__nonzero(self);
35000}
35001at::Tensor & nonzero_out(at::Tensor & out, const at::Tensor & self) {
35002return wrapper_CPU_out_nonzero_out(self, out);
35003}
35004at::Tensor & nonzero_outf(const at::Tensor & self, at::Tensor & out) {
35005return wrapper_CPU_out_nonzero_out(self, out);
35006}
35007at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
35008return wrapper_CPU_gather(self, dim, index, sparse_grad);
35009}
35010at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
35011return wrapper_CPU_gather_out_out(self, dim, index, sparse_grad, out);
35012}
35013at::Tensor & gather_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
35014return wrapper_CPU_gather_out_out(self, dim, index, sparse_grad, out);
35015}
35016at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
35017return wrapper_CPU_addcmul(self, tensor1, tensor2, value);
35018}
35019at::Tensor & addcmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
35020return wrapper_CPU_addcmul_out_out(self, tensor1, tensor2, value, out);
35021}
35022at::Tensor & addcmul_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
35023return wrapper_CPU_addcmul_out_out(self, tensor1, tensor2, value, out);
35024}
35025at::Tensor & addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
35026return wrapper_CPU_addcmul_(self, tensor1, tensor2, value);
35027}
35028at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
35029return wrapper_CPU_addcdiv(self, tensor1, tensor2, value);
35030}
35031at::Tensor & addcdiv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
35032return wrapper_CPU_addcdiv_out_out(self, tensor1, tensor2, value, out);
35033}
35034at::Tensor & addcdiv_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
35035return wrapper_CPU_addcdiv_out_out(self, tensor1, tensor2, value, out);
35036}
35037at::Tensor & addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
35038return wrapper_CPU_addcdiv_(self, tensor1, tensor2, value);
35039}
35040::std::tuple<at::Tensor,at::Tensor> triangular_solve(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
35041return wrapper_CPU_triangular_solve(self, A, upper, transpose, unitriangular);
35042}
35043::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_out(at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
35044return wrapper_CPU_triangular_solve_out_X(self, A, upper, transpose, unitriangular, X, M);
35045}
35046::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_outf(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
35047return wrapper_CPU_triangular_solve_out_X(self, A, upper, transpose, unitriangular, X, M);
35048}
35049at::Tensor linalg_solve_triangular(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
35050return wrapper_CPU__linalg_solve_triangular(self, B, upper, left, unitriangular);
35051}
35052at::Tensor & linalg_solve_triangular_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
35053return wrapper_CPU_out_linalg_solve_triangular_out(self, B, upper, left, unitriangular, out);
35054}
35055at::Tensor & linalg_solve_triangular_outf(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) {
35056return wrapper_CPU_out_linalg_solve_triangular_out(self, B, upper, left, unitriangular, out);
35057}
35058at::Tensor cholesky(const at::Tensor & self, bool upper) {
35059return wrapper_CPU__cholesky(self, upper);
35060}
35061at::Tensor & cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper) {
35062return wrapper_CPU_out_cholesky_out(self, upper, out);
35063}
35064at::Tensor & cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out) {
35065return wrapper_CPU_out_cholesky_out(self, upper, out);
35066}
35067at::Tensor _cholesky_solve_helper(const at::Tensor & self, const at::Tensor & A, bool upper) {
35068return wrapper_CPU___cholesky_solve_helper(self, A, upper);
35069}
35070at::Tensor cholesky_inverse(const at::Tensor & self, bool upper) {
35071return wrapper_CPU__cholesky_inverse(self, upper);
35072}
35073at::Tensor & cholesky_inverse_out(at::Tensor & out, const at::Tensor & self, bool upper) {
35074return wrapper_CPU_out_cholesky_inverse_out(self, upper, out);
35075}
35076at::Tensor & cholesky_inverse_outf(const at::Tensor & self, bool upper, at::Tensor & out) {
35077return wrapper_CPU_out_cholesky_inverse_out(self, upper, out);
35078}
35079::std::tuple<at::Tensor,at::Tensor> geqrf(const at::Tensor & self) {
35080return wrapper_CPU__geqrf(self);
35081}
35082::std::tuple<at::Tensor &,at::Tensor &> geqrf_out(at::Tensor & a, at::Tensor & tau, const at::Tensor & self) {
35083return wrapper_CPU_a_geqrf_out(self, a, tau);
35084}
35085::std::tuple<at::Tensor &,at::Tensor &> geqrf_outf(const at::Tensor & self, at::Tensor & a, at::Tensor & tau) {
35086return wrapper_CPU_a_geqrf_out(self, a, tau);
35087}
35088at::Tensor ormqr(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
35089return wrapper_CPU__ormqr(self, input2, input3, left, transpose);
35090}
35091at::Tensor & ormqr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
35092return wrapper_CPU_out_ormqr_out(self, input2, input3, left, transpose, out);
35093}
35094at::Tensor & ormqr_outf(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) {
35095return wrapper_CPU_out_ormqr_out(self, input2, input3, left, transpose, out);
35096}
35097::std::tuple<at::Tensor,at::Tensor,at::Tensor> lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
35098return wrapper_CPU_lu_unpack(LU_data, LU_pivots, unpack_data, unpack_pivots);
35099}
35100::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> lu_unpack_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
35101return wrapper_CPU_lu_unpack_out_out(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U);
35102}
35103::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> lu_unpack_outf(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
35104return wrapper_CPU_lu_unpack_out_out(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U);
35105}
35106at::Tensor multinomial(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator) {
35107return wrapper_CPU__multinomial(self, num_samples, replacement, generator);
35108}
35109at::Tensor & multinomial_out(at::Tensor & out, const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator) {
35110return wrapper_CPU_out_multinomial_out(self, num_samples, replacement, generator, out);
35111}
35112at::Tensor & multinomial_outf(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator, at::Tensor & out) {
35113return wrapper_CPU_out_multinomial_out(self, num_samples, replacement, generator, out);
35114}
35115at::Tensor lgamma(const at::Tensor & self) {
35116return wrapper_CPU_lgamma(self);
35117}
35118at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self) {
35119return wrapper_CPU_lgamma_out_out(self, out);
35120}
35121at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out) {
35122return wrapper_CPU_lgamma_out_out(self, out);
35123}
35124at::Tensor & lgamma_(at::Tensor & self) {
35125return wrapper_CPU_lgamma_(self);
35126}
35127at::Tensor polygamma(int64_t n, const at::Tensor & self) {
35128return wrapper_CPU_polygamma(n, self);
35129}
35130at::Tensor & polygamma_out(at::Tensor & out, int64_t n, const at::Tensor & self) {
35131return wrapper_CPU_polygamma_out_out(n, self, out);
35132}
35133at::Tensor & polygamma_outf(int64_t n, const at::Tensor & self, at::Tensor & out) {
35134return wrapper_CPU_polygamma_out_out(n, self, out);
35135}
35136at::Tensor erfinv(const at::Tensor & self) {
35137return wrapper_CPU_erfinv(self);
35138}
35139at::Tensor & erfinv_out(at::Tensor & out, const at::Tensor & self) {
35140return wrapper_CPU_erfinv_out_out(self, out);
35141}
35142at::Tensor & erfinv_outf(const at::Tensor & self, at::Tensor & out) {
35143return wrapper_CPU_erfinv_out_out(self, out);
35144}
35145at::Tensor & erfinv_(at::Tensor & self) {
35146return wrapper_CPU_erfinv_(self);
35147}
35148at::Tensor i0(const at::Tensor & self) {
35149return wrapper_CPU_i0(self);
35150}
35151at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self) {
35152return wrapper_CPU_i0_out_out(self, out);
35153}
35154at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out) {
35155return wrapper_CPU_i0_out_out(self, out);
35156}
35157at::Tensor & i0_(at::Tensor & self) {
35158return wrapper_CPU_i0_(self);
35159}
35160at::Tensor sign(const at::Tensor & self) {
35161return wrapper_CPU_sign(self);
35162}
35163at::Tensor & sign_out(at::Tensor & out, const at::Tensor & self) {
35164return wrapper_CPU_sign_out_out(self, out);
35165}
35166at::Tensor & sign_outf(const at::Tensor & self, at::Tensor & out) {
35167return wrapper_CPU_sign_out_out(self, out);
35168}
35169at::Tensor & sign_(at::Tensor & self) {
35170return wrapper_CPU_sign_(self);
35171}
35172at::Tensor signbit(const at::Tensor & self) {
35173return wrapper_CPU_signbit(self);
35174}
35175at::Tensor & signbit_out(at::Tensor & out, const at::Tensor & self) {
35176return wrapper_CPU_signbit_out_out(self, out);
35177}
35178at::Tensor & signbit_outf(const at::Tensor & self, at::Tensor & out) {
35179return wrapper_CPU_signbit_out_out(self, out);
35180}
35181at::Tensor atan2(const at::Tensor & self, const at::Tensor & other) {
35182return wrapper_CPU_atan2(self, other);
35183}
35184at::Tensor & atan2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
35185return wrapper_CPU_atan2_out_out(self, other, out);
35186}
35187at::Tensor & atan2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
35188return wrapper_CPU_atan2_out_out(self, other, out);
35189}
35190at::Tensor & atan2_(at::Tensor & self, const at::Tensor & other) {
35191return wrapper_CPU_atan2_(self, other);
35192}
35193at::Tensor histc(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
35194return wrapper_CPU__histc(self, bins, min, max);
35195}
35196at::Tensor & histc_out(at::Tensor & out, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
35197return wrapper_CPU_out_histc_out(self, bins, min, max, out);
35198}
35199at::Tensor & histc_outf(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) {
35200return wrapper_CPU_out_histc_out(self, bins, min, max, out);
35201}
35202::std::tuple<at::Tensor,at::Tensor> histogram(const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density) {
35203return wrapper_CPU_bins_tensor_histogram(self, bins, weight, density);
35204}
35205::std::tuple<at::Tensor &,at::Tensor &> histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density) {
35206return wrapper_CPU_bins_tensor_out_histogram_out(self, bins, weight, density, hist, bin_edges);
35207}
35208::std::tuple<at::Tensor &,at::Tensor &> histogram_outf(const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
35209return wrapper_CPU_bins_tensor_out_histogram_out(self, bins, weight, density, hist, bin_edges);
35210}
35211::std::tuple<at::Tensor,at::Tensor> histogram(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
35212return wrapper_CPU_bin_ct_histogram(self, bins, range, weight, density);
35213}
35214::std::tuple<at::Tensor &,at::Tensor &> histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
35215return wrapper_CPU_bin_ct_out_histogram_out(self, bins, range, weight, density, hist, bin_edges);
35216}
35217::std::tuple<at::Tensor &,at::Tensor &> histogram_outf(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
35218return wrapper_CPU_bin_ct_out_histogram_out(self, bins, range, weight, density, hist, bin_edges);
35219}
35220::std::vector<at::Tensor> _histogramdd_bin_edges(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
35221return wrapper_CPU___histogramdd_bin_edges(self, bins, range, weight, density);
35222}
35223at::Tensor _histogramdd_from_bin_cts(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
35224return wrapper_CPU___histogramdd_from_bin_cts(self, bins, range, weight, density);
35225}
35226at::Tensor _histogramdd_from_bin_tensors(const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density) {
35227return wrapper_CPU___histogramdd_from_bin_tensors(self, bins, weight, density);
35228}
35229at::Tensor fmod(const at::Tensor & self, const at::Tensor & other) {
35230return wrapper_CPU_fmod_Tensor(self, other);
35231}
35232at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
35233return wrapper_CPU_fmod_out_Tensor_out(self, other, out);
35234}
35235at::Tensor & fmod_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
35236return wrapper_CPU_fmod_out_Tensor_out(self, other, out);
35237}
35238at::Tensor & fmod_(at::Tensor & self, const at::Tensor & other) {
35239return wrapper_CPU_fmod__Tensor(self, other);
35240}
35241at::Tensor hypot(const at::Tensor & self, const at::Tensor & other) {
35242return wrapper_CPU_hypot(self, other);
35243}
35244at::Tensor & hypot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
35245return wrapper_CPU_hypot_out_out(self, other, out);
35246}
35247at::Tensor & hypot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
35248return wrapper_CPU_hypot_out_out(self, other, out);
35249}
35250at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other) {
35251return wrapper_CPU_hypot_(self, other);
35252}
35253at::Tensor igamma(const at::Tensor & self, const at::Tensor & other) {
35254return wrapper_CPU_igamma(self, other);
35255}
35256at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
35257return wrapper_CPU_igamma_out_out(self, other, out);
35258}
35259at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
35260return wrapper_CPU_igamma_out_out(self, other, out);
35261}
35262at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other) {
35263return wrapper_CPU_igamma_(self, other);
35264}
35265at::Tensor igammac(const at::Tensor & self, const at::Tensor & other) {
35266return wrapper_CPU_igammac(self, other);
35267}
35268at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
35269return wrapper_CPU_igammac_out_out(self, other, out);
35270}
35271at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
35272return wrapper_CPU_igammac_out_out(self, other, out);
35273}
35274at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other) {
35275return wrapper_CPU_igammac_(self, other);
35276}
35277at::Tensor nextafter(const at::Tensor & self, const at::Tensor & other) {
35278return wrapper_CPU_nextafter(self, other);
35279}
35280at::Tensor & nextafter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
35281return wrapper_CPU_nextafter_out_out(self, other, out);
35282}
35283at::Tensor & nextafter_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
35284return wrapper_CPU_nextafter_out_out(self, other, out);
35285}
35286at::Tensor & nextafter_(at::Tensor & self, const at::Tensor & other) {
35287return wrapper_CPU_nextafter_(self, other);
35288}
35289at::Tensor remainder(const at::Tensor & self, const at::Tensor & other) {
35290return wrapper_CPU_remainder_Tensor(self, other);
35291}
35292at::Tensor & remainder_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
35293return wrapper_CPU_remainder_out_Tensor_out(self, other, out);
35294}
35295at::Tensor & remainder_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
35296return wrapper_CPU_remainder_out_Tensor_out(self, other, out);
35297}
35298at::Tensor & remainder_(at::Tensor & self, const at::Tensor & other) {
35299return wrapper_CPU_remainder__Tensor(self, other);
35300}
35301at::Tensor remainder(const at::Scalar & self, const at::Tensor & other) {
35302return wrapper_CPU_Scalar_Tensor_remainder(self, other);
35303}
35304at::Tensor min(const at::Tensor & self) {
35305return wrapper_CPU__min(self);
35306}
35307at::Tensor fmin(const at::Tensor & self, const at::Tensor & other) {
35308return wrapper_CPU_fmin(self, other);
35309}
35310at::Tensor & fmin_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
35311return wrapper_CPU_fmin_out_out(self, other, out);
35312}
35313at::Tensor & fmin_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
35314return wrapper_CPU_fmin_out_out(self, other, out);
35315}
35316at::Tensor max(const at::Tensor & self) {
35317return wrapper_CPU__max(self);
35318}
35319at::Tensor & max_out(at::Tensor & out, const at::Tensor & self) {
35320return wrapper_CPU_unary_out_max_out(self, out);
35321}
35322at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out) {
35323return wrapper_CPU_unary_out_max_out(self, out);
35324}
35325at::Tensor fmax(const at::Tensor & self, const at::Tensor & other) {
35326return wrapper_CPU_fmax(self, other);
35327}
35328at::Tensor & fmax_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
35329return wrapper_CPU_fmax_out_out(self, other, out);
35330}
35331at::Tensor & fmax_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
35332return wrapper_CPU_fmax_out_out(self, other, out);
35333}
35334at::Tensor maximum(const at::Tensor & self, const at::Tensor & other) {
35335return wrapper_CPU_maximum(self, other);
35336}
35337at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
35338return wrapper_CPU_maximum_out_out(self, other, out);
35339}
35340at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
35341return wrapper_CPU_maximum_out_out(self, other, out);
35342}
35343at::Tensor minimum(const at::Tensor & self, const at::Tensor & other) {
35344return wrapper_CPU_minimum(self, other);
35345}
35346at::Tensor & minimum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
35347return wrapper_CPU_minimum_out_out(self, other, out);
35348}
35349at::Tensor & minimum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
35350return wrapper_CPU_minimum_out_out(self, other, out);
35351}
35352::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
35353return wrapper_CPU_sort_stable(self, stable, dim, descending);
35354}
35355::std::tuple<at::Tensor &,at::Tensor &> sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
35356return wrapper_CPU_sort_out_values_stable(self, stable, dim, descending, values, indices);
35357}
35358::std::tuple<at::Tensor &,at::Tensor &> sort_outf(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
35359return wrapper_CPU_sort_out_values_stable(self, stable, dim, descending, values, indices);
35360}
35361at::Tensor argsort(const at::Tensor & self, bool stable, int64_t dim, bool descending) {
35362return wrapper_CPU_stable_argsort(self, stable, dim, descending);
35363}
35364::std::tuple<at::Tensor,at::Tensor> topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
35365return wrapper_CPU_topk(self, k, dim, largest, sorted);
35366}
35367::std::tuple<at::Tensor &,at::Tensor &> topk_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
35368return wrapper_CPU_topk_out_values(self, k, dim, largest, sorted, values, indices);
35369}
35370::std::tuple<at::Tensor &,at::Tensor &> topk_outf(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) {
35371return wrapper_CPU_topk_out_values(self, k, dim, largest, sorted, values, indices);
35372}
35373at::Tensor all(const at::Tensor & self) {
35374return wrapper_CPU_all(self);
35375}
35376at::Tensor & all_out(at::Tensor & out, const at::Tensor & self) {
35377return wrapper_CPU_all_out_all_out(self, out);
35378}
35379at::Tensor & all_outf(const at::Tensor & self, at::Tensor & out) {
35380return wrapper_CPU_all_out_all_out(self, out);
35381}
35382at::Tensor any(const at::Tensor & self) {
35383return wrapper_CPU_any(self);
35384}
35385at::Tensor & any_out(at::Tensor & out, const at::Tensor & self) {
35386return wrapper_CPU_any_out_all_out(self, out);
35387}
35388at::Tensor & any_outf(const at::Tensor & self, at::Tensor & out) {
35389return wrapper_CPU_any_out_all_out(self, out);
35390}
35391at::Tensor renorm(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
35392return wrapper_CPU_renorm(self, p, dim, maxnorm);
35393}
35394at::Tensor & renorm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
35395return wrapper_CPU_renorm_out_out(self, p, dim, maxnorm, out);
35396}
35397at::Tensor & renorm_outf(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out) {
35398return wrapper_CPU_renorm_out_out(self, p, dim, maxnorm, out);
35399}
35400at::Tensor & renorm_(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
35401return wrapper_CPU_renorm_(self, p, dim, maxnorm);
35402}
35403at::Tensor unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
35404return wrapper_CPU__unfold(self, dimension, size, step);
35405}
35406at::Tensor unfold_backward(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
35407return wrapper_CPU__unfold_backward(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step);
35408}
35409at::Tensor unfold_backward_symint(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
35410return wrapper_CPU__unfold_backward(grad_in, input_sizes, dim, size, step);
35411}
35412bool equal(const at::Tensor & self, const at::Tensor & other) {
35413return wrapper_CPU__equal(self, other);
35414}
35415at::Tensor pow(const at::Tensor & self, const at::Tensor & exponent) {
35416return wrapper_CPU_pow_Tensor_Tensor(self, exponent);
35417}
35418at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent) {
35419return wrapper_CPU_pow_out_Tensor_Tensor_out(self, exponent, out);
35420}
35421at::Tensor & pow_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
35422return wrapper_CPU_pow_out_Tensor_Tensor_out(self, exponent, out);
35423}
35424at::Tensor & pow_(at::Tensor & self, const at::Tensor & exponent) {
35425return wrapper_CPU_pow__Tensor(self, exponent);
35426}
35427at::Tensor pow(const at::Scalar & self, const at::Tensor & exponent) {
35428return wrapper_CPU_pow_Scalar(self, exponent);
35429}
35430at::Tensor & pow_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent) {
35431return wrapper_CPU_pow_out_Scalar_out(self, exponent, out);
35432}
35433at::Tensor & pow_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
35434return wrapper_CPU_pow_out_Scalar_out(self, exponent, out);
35435}
35436at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent) {
35437return wrapper_CPU_pow_Tensor_Scalar(self, exponent);
35438}
35439at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) {
35440return wrapper_CPU_pow_out_Tensor_Scalar_out(self, exponent, out);
35441}
35442at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
35443return wrapper_CPU_pow_out_Tensor_Scalar_out(self, exponent, out);
35444}
35445at::Tensor & pow_(at::Tensor & self, const at::Scalar & exponent) {
35446return wrapper_CPU_pow__Scalar(self, exponent);
35447}
35448at::Tensor & normal_(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
35449return wrapper_CPU__normal_(self, mean, std, generator);
35450}
35451at::Tensor normal(const at::Tensor & mean, double std, c10::optional<at::Generator> generator) {
35452return wrapper_CPU_Tensor_float_normal(mean, std, generator);
35453}
35454at::Tensor & normal_out(at::Tensor & out, const at::Tensor & mean, double std, c10::optional<at::Generator> generator) {
35455return wrapper_CPU_Tensor_float_out_normal_out(mean, std, generator, out);
35456}
35457at::Tensor & normal_outf(const at::Tensor & mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) {
35458return wrapper_CPU_Tensor_float_out_normal_out(mean, std, generator, out);
35459}
35460at::Tensor normal(double mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
35461return wrapper_CPU_float_Tensor_normal(mean, std, generator);
35462}
35463at::Tensor & normal_out(at::Tensor & out, double mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
35464return wrapper_CPU_float_Tensor_out_normal_out(mean, std, generator, out);
35465}
35466at::Tensor & normal_outf(double mean, const at::Tensor & std, c10::optional<at::Generator> generator, at::Tensor & out) {
35467return wrapper_CPU_float_Tensor_out_normal_out(mean, std, generator, out);
35468}
35469at::Tensor normal(const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
35470return wrapper_CPU_Tensor_Tensor_normal(mean, std, generator);
35471}
35472at::Tensor & normal_out(at::Tensor & out, const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
35473return wrapper_CPU_Tensor_Tensor_out_normal_out(mean, std, generator, out);
35474}
35475at::Tensor & normal_outf(const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator, at::Tensor & out) {
35476return wrapper_CPU_Tensor_Tensor_out_normal_out(mean, std, generator, out);
35477}
35478::std::vector<at::Tensor> _foreach_add(at::TensorList self, const at::Scalar & scalar) {
35479return wrapper_CPU_Scalar__foreach_add(self, scalar);
35480}
35481void _foreach_add_(at::TensorList self, const at::Scalar & scalar) {
35482return wrapper_CPU_Scalar__foreach_add_(self, scalar);
35483}
35484::std::vector<at::Tensor> _foreach_sub(at::TensorList self, const at::Scalar & scalar) {
35485return wrapper_CPU_Scalar__foreach_sub(self, scalar);
35486}
35487void _foreach_sub_(at::TensorList self, const at::Scalar & scalar) {
35488return wrapper_CPU_Scalar__foreach_sub_(self, scalar);
35489}
35490::std::vector<at::Tensor> _foreach_mul(at::TensorList self, const at::Scalar & scalar) {
35491return wrapper_CPU_Scalar__foreach_mul(self, scalar);
35492}
35493void _foreach_mul_(at::TensorList self, const at::Scalar & scalar) {
35494return wrapper_CPU_Scalar__foreach_mul_(self, scalar);
35495}
35496::std::vector<at::Tensor> _foreach_div(at::TensorList self, const at::Scalar & scalar) {
35497return wrapper_CPU_Scalar__foreach_div(self, scalar);
35498}
35499void _foreach_div_(at::TensorList self, const at::Scalar & scalar) {
35500return wrapper_CPU_Scalar__foreach_div_(self, scalar);
35501}
35502::std::vector<at::Tensor> _foreach_clamp_min(at::TensorList self, const at::Scalar & scalar) {
35503return wrapper_CPU_Scalar__foreach_clamp_min(self, scalar);
35504}
35505void _foreach_clamp_min_(at::TensorList self, const at::Scalar & scalar) {
35506return wrapper_CPU_Scalar__foreach_clamp_min_(self, scalar);
35507}
35508::std::vector<at::Tensor> _foreach_clamp_max(at::TensorList self, const at::Scalar & scalar) {
35509return wrapper_CPU_Scalar__foreach_clamp_max(self, scalar);
35510}
35511void _foreach_clamp_max_(at::TensorList self, const at::Scalar & scalar) {
35512return wrapper_CPU_Scalar__foreach_clamp_max_(self, scalar);
35513}
35514::std::vector<at::Tensor> _foreach_maximum(at::TensorList self, const at::Scalar & scalar) {
35515return wrapper_CPU_Scalar__foreach_maximum(self, scalar);
35516}
35517void _foreach_maximum_(at::TensorList self, const at::Scalar & scalar) {
35518return wrapper_CPU_Scalar__foreach_maximum_(self, scalar);
35519}
35520::std::vector<at::Tensor> _foreach_minimum(at::TensorList self, const at::Scalar & scalar) {
35521return wrapper_CPU_Scalar__foreach_minimum(self, scalar);
35522}
35523void _foreach_minimum_(at::TensorList self, const at::Scalar & scalar) {
35524return wrapper_CPU_Scalar__foreach_minimum_(self, scalar);
35525}
35526::std::vector<at::Tensor> _foreach_add(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
35527return wrapper_CPU_List__foreach_add(self, other, alpha);
35528}
35529void _foreach_add_(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
35530return wrapper_CPU_List__foreach_add_(self, other, alpha);
35531}
35532::std::vector<at::Tensor> _foreach_sub(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
35533return wrapper_CPU_List__foreach_sub(self, other, alpha);
35534}
35535void _foreach_sub_(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
35536return wrapper_CPU_List__foreach_sub_(self, other, alpha);
35537}
35538::std::vector<at::Tensor> _foreach_mul(at::TensorList self, at::TensorList other) {
35539return wrapper_CPU_List__foreach_mul(self, other);
35540}
35541void _foreach_mul_(at::TensorList self, at::TensorList other) {
35542return wrapper_CPU_List__foreach_mul_(self, other);
35543}
35544::std::vector<at::Tensor> _foreach_div(at::TensorList self, at::TensorList other) {
35545return wrapper_CPU_List__foreach_div(self, other);
35546}
35547void _foreach_div_(at::TensorList self, at::TensorList other) {
35548return wrapper_CPU_List__foreach_div_(self, other);
35549}
35550::std::vector<at::Tensor> _foreach_clamp_min(at::TensorList self, at::TensorList other) {
35551return wrapper_CPU_List__foreach_clamp_min(self, other);
35552}
35553void _foreach_clamp_min_(at::TensorList self, at::TensorList other) {
35554return wrapper_CPU_List__foreach_clamp_min_(self, other);
35555}
35556::std::vector<at::Tensor> _foreach_clamp_max(at::TensorList self, at::TensorList other) {
35557return wrapper_CPU_List__foreach_clamp_max(self, other);
35558}
35559void _foreach_clamp_max_(at::TensorList self, at::TensorList other) {
35560return wrapper_CPU_List__foreach_clamp_max_(self, other);
35561}
35562::std::vector<at::Tensor> _foreach_maximum(at::TensorList self, at::TensorList other) {
35563return wrapper_CPU_List__foreach_maximum(self, other);
35564}
35565void _foreach_maximum_(at::TensorList self, at::TensorList other) {
35566return wrapper_CPU_List__foreach_maximum_(self, other);
35567}
35568::std::vector<at::Tensor> _foreach_minimum(at::TensorList self, at::TensorList other) {
35569return wrapper_CPU_List__foreach_minimum(self, other);
35570}
35571void _foreach_minimum_(at::TensorList self, at::TensorList other) {
35572return wrapper_CPU_List__foreach_minimum_(self, other);
35573}
35574::std::vector<at::Tensor> _foreach_add(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35575return wrapper_CPU_ScalarList__foreach_add(self, scalars);
35576}
35577void _foreach_add_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35578return wrapper_CPU_ScalarList__foreach_add_(self, scalars);
35579}
35580::std::vector<at::Tensor> _foreach_sub(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35581return wrapper_CPU_ScalarList__foreach_sub(self, scalars);
35582}
35583void _foreach_sub_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35584return wrapper_CPU_ScalarList__foreach_sub_(self, scalars);
35585}
35586::std::vector<at::Tensor> _foreach_div(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35587return wrapper_CPU_ScalarList__foreach_div(self, scalars);
35588}
35589void _foreach_div_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35590return wrapper_CPU_ScalarList__foreach_div_(self, scalars);
35591}
35592::std::vector<at::Tensor> _foreach_mul(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35593return wrapper_CPU_ScalarList__foreach_mul(self, scalars);
35594}
35595void _foreach_mul_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35596return wrapper_CPU_ScalarList__foreach_mul_(self, scalars);
35597}
35598::std::vector<at::Tensor> _foreach_clamp_min(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35599return wrapper_CPU_ScalarList__foreach_clamp_min(self, scalars);
35600}
35601void _foreach_clamp_min_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35602return wrapper_CPU_ScalarList__foreach_clamp_min_(self, scalars);
35603}
35604::std::vector<at::Tensor> _foreach_clamp_max(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35605return wrapper_CPU_ScalarList__foreach_clamp_max(self, scalars);
35606}
35607void _foreach_clamp_max_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35608return wrapper_CPU_ScalarList__foreach_clamp_max_(self, scalars);
35609}
35610::std::vector<at::Tensor> _foreach_maximum(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35611return wrapper_CPU_ScalarList__foreach_maximum(self, scalars);
35612}
35613void _foreach_maximum_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35614return wrapper_CPU_ScalarList__foreach_maximum_(self, scalars);
35615}
35616::std::vector<at::Tensor> _foreach_minimum(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35617return wrapper_CPU_ScalarList__foreach_minimum(self, scalars);
35618}
35619void _foreach_minimum_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
35620return wrapper_CPU_ScalarList__foreach_minimum_(self, scalars);
35621}
35622::std::vector<at::Tensor> _foreach_exp(at::TensorList self) {
35623return wrapper_CPU___foreach_exp(self);
35624}
35625void _foreach_exp_(at::TensorList self) {
35626return wrapper_CPU___foreach_exp_(self);
35627}
35628void _foreach_zero_(at::TensorList self) {
35629return wrapper_CPU___foreach_zero_(self);
35630}
35631::std::vector<at::Tensor> _foreach_sqrt(at::TensorList self) {
35632return wrapper_CPU___foreach_sqrt(self);
35633}
35634void _foreach_sqrt_(at::TensorList self) {
35635return wrapper_CPU___foreach_sqrt_(self);
35636}
35637::std::vector<at::Tensor> _foreach_abs(at::TensorList self) {
35638return wrapper_CPU___foreach_abs(self);
35639}
35640void _foreach_abs_(at::TensorList self) {
35641return wrapper_CPU___foreach_abs_(self);
35642}
35643::std::vector<at::Tensor> _foreach_acos(at::TensorList self) {
35644return wrapper_CPU___foreach_acos(self);
35645}
35646void _foreach_acos_(at::TensorList self) {
35647return wrapper_CPU___foreach_acos_(self);
35648}
35649::std::vector<at::Tensor> _foreach_asin(at::TensorList self) {
35650return wrapper_CPU___foreach_asin(self);
35651}
35652void _foreach_asin_(at::TensorList self) {
35653return wrapper_CPU___foreach_asin_(self);
35654}
35655::std::vector<at::Tensor> _foreach_atan(at::TensorList self) {
35656return wrapper_CPU___foreach_atan(self);
35657}
35658void _foreach_atan_(at::TensorList self) {
35659return wrapper_CPU___foreach_atan_(self);
35660}
35661::std::vector<at::Tensor> _foreach_ceil(at::TensorList self) {
35662return wrapper_CPU___foreach_ceil(self);
35663}
35664void _foreach_ceil_(at::TensorList self) {
35665return wrapper_CPU___foreach_ceil_(self);
35666}
35667::std::vector<at::Tensor> _foreach_cos(at::TensorList self) {
35668return wrapper_CPU___foreach_cos(self);
35669}
35670void _foreach_cos_(at::TensorList self) {
35671return wrapper_CPU___foreach_cos_(self);
35672}
35673::std::vector<at::Tensor> _foreach_cosh(at::TensorList self) {
35674return wrapper_CPU___foreach_cosh(self);
35675}
35676void _foreach_cosh_(at::TensorList self) {
35677return wrapper_CPU___foreach_cosh_(self);
35678}
35679::std::vector<at::Tensor> _foreach_erf(at::TensorList self) {
35680return wrapper_CPU___foreach_erf(self);
35681}
35682void _foreach_erf_(at::TensorList self) {
35683return wrapper_CPU___foreach_erf_(self);
35684}
35685::std::vector<at::Tensor> _foreach_erfc(at::TensorList self) {
35686return wrapper_CPU___foreach_erfc(self);
35687}
35688void _foreach_erfc_(at::TensorList self) {
35689return wrapper_CPU___foreach_erfc_(self);
35690}
35691::std::vector<at::Tensor> _foreach_expm1(at::TensorList self) {
35692return wrapper_CPU___foreach_expm1(self);
35693}
35694void _foreach_expm1_(at::TensorList self) {
35695return wrapper_CPU___foreach_expm1_(self);
35696}
35697::std::vector<at::Tensor> _foreach_floor(at::TensorList self) {
35698return wrapper_CPU___foreach_floor(self);
35699}
35700void _foreach_floor_(at::TensorList self) {
35701return wrapper_CPU___foreach_floor_(self);
35702}
35703::std::vector<at::Tensor> _foreach_log(at::TensorList self) {
35704return wrapper_CPU___foreach_log(self);
35705}
35706void _foreach_log_(at::TensorList self) {
35707return wrapper_CPU___foreach_log_(self);
35708}
35709::std::vector<at::Tensor> _foreach_log10(at::TensorList self) {
35710return wrapper_CPU___foreach_log10(self);
35711}
35712void _foreach_log10_(at::TensorList self) {
35713return wrapper_CPU___foreach_log10_(self);
35714}
35715::std::vector<at::Tensor> _foreach_log1p(at::TensorList self) {
35716return wrapper_CPU___foreach_log1p(self);
35717}
35718void _foreach_log1p_(at::TensorList self) {
35719return wrapper_CPU___foreach_log1p_(self);
35720}
35721::std::vector<at::Tensor> _foreach_log2(at::TensorList self) {
35722return wrapper_CPU___foreach_log2(self);
35723}
35724void _foreach_log2_(at::TensorList self) {
35725return wrapper_CPU___foreach_log2_(self);
35726}
35727::std::vector<at::Tensor> _foreach_neg(at::TensorList self) {
35728return wrapper_CPU___foreach_neg(self);
35729}
35730void _foreach_neg_(at::TensorList self) {
35731return wrapper_CPU___foreach_neg_(self);
35732}
35733::std::vector<at::Tensor> _foreach_tan(at::TensorList self) {
35734return wrapper_CPU___foreach_tan(self);
35735}
35736void _foreach_tan_(at::TensorList self) {
35737return wrapper_CPU___foreach_tan_(self);
35738}
35739::std::vector<at::Tensor> _foreach_tanh(at::TensorList self) {
35740return wrapper_CPU___foreach_tanh(self);
35741}
35742void _foreach_tanh_(at::TensorList self) {
35743return wrapper_CPU___foreach_tanh_(self);
35744}
35745::std::vector<at::Tensor> _foreach_sin(at::TensorList self) {
35746return wrapper_CPU___foreach_sin(self);
35747}
35748void _foreach_sin_(at::TensorList self) {
35749return wrapper_CPU___foreach_sin_(self);
35750}
35751::std::vector<at::Tensor> _foreach_sinh(at::TensorList self) {
35752return wrapper_CPU___foreach_sinh(self);
35753}
35754void _foreach_sinh_(at::TensorList self) {
35755return wrapper_CPU___foreach_sinh_(self);
35756}
35757::std::vector<at::Tensor> _foreach_round(at::TensorList self) {
35758return wrapper_CPU___foreach_round(self);
35759}
35760void _foreach_round_(at::TensorList self) {
35761return wrapper_CPU___foreach_round_(self);
35762}
35763::std::vector<at::Tensor> _foreach_lgamma(at::TensorList self) {
35764return wrapper_CPU___foreach_lgamma(self);
35765}
35766void _foreach_lgamma_(at::TensorList self) {
35767return wrapper_CPU___foreach_lgamma_(self);
35768}
35769::std::vector<at::Tensor> _foreach_frac(at::TensorList self) {
35770return wrapper_CPU___foreach_frac(self);
35771}
35772void _foreach_frac_(at::TensorList self) {
35773return wrapper_CPU___foreach_frac_(self);
35774}
35775::std::vector<at::Tensor> _foreach_reciprocal(at::TensorList self) {
35776return wrapper_CPU___foreach_reciprocal(self);
35777}
35778void _foreach_reciprocal_(at::TensorList self) {
35779return wrapper_CPU___foreach_reciprocal_(self);
35780}
35781::std::vector<at::Tensor> _foreach_sigmoid(at::TensorList self) {
35782return wrapper_CPU___foreach_sigmoid(self);
35783}
35784void _foreach_sigmoid_(at::TensorList self) {
35785return wrapper_CPU___foreach_sigmoid_(self);
35786}
35787::std::vector<at::Tensor> _foreach_trunc(at::TensorList self) {
35788return wrapper_CPU___foreach_trunc(self);
35789}
35790void _foreach_trunc_(at::TensorList self) {
35791return wrapper_CPU___foreach_trunc_(self);
35792}
35793::std::vector<at::Tensor> _foreach_addcdiv(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
35794return wrapper_CPU_Scalar__foreach_addcdiv(self, tensor1, tensor2, value);
35795}
35796void _foreach_addcdiv_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
35797return wrapper_CPU_Scalar__foreach_addcdiv_(self, tensor1, tensor2, value);
35798}
35799::std::vector<at::Tensor> _foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
35800return wrapper_CPU_Scalar__foreach_addcmul(self, tensor1, tensor2, value);
35801}
35802void _foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
35803return wrapper_CPU_Scalar__foreach_addcmul_(self, tensor1, tensor2, value);
35804}
35805::std::vector<at::Tensor> _foreach_addcdiv(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
35806return wrapper_CPU_ScalarList__foreach_addcdiv(self, tensor1, tensor2, scalars);
35807}
35808void _foreach_addcdiv_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
35809return wrapper_CPU_ScalarList__foreach_addcdiv_(self, tensor1, tensor2, scalars);
35810}
35811::std::vector<at::Tensor> _foreach_addcdiv(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
35812return wrapper_CPU_Tensor__foreach_addcdiv(self, tensor1, tensor2, scalars);
35813}
35814void _foreach_addcdiv_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
35815return wrapper_CPU_Tensor__foreach_addcdiv_(self, tensor1, tensor2, scalars);
35816}
35817::std::vector<at::Tensor> _foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
35818return wrapper_CPU_ScalarList__foreach_addcmul(self, tensor1, tensor2, scalars);
35819}
35820void _foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
35821return wrapper_CPU_ScalarList__foreach_addcmul_(self, tensor1, tensor2, scalars);
35822}
35823::std::vector<at::Tensor> _foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
35824return wrapper_CPU_Tensor__foreach_addcmul(self, tensor1, tensor2, scalars);
35825}
35826void _foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
35827return wrapper_CPU_Tensor__foreach_addcmul_(self, tensor1, tensor2, scalars);
35828}
35829::std::vector<at::Tensor> _foreach_norm(at::TensorList self, const at::Scalar & ord) {
35830return wrapper_CPU_Scalar__foreach_norm(self, ord);
35831}
35832::std::vector<at::Tensor> _foreach_lerp(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
35833return wrapper_CPU_List__foreach_lerp(self, tensors1, weights);
35834}
35835void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
35836return wrapper_CPU_List__foreach_lerp_(self, tensors1, weights);
35837}
35838::std::vector<at::Tensor> _foreach_lerp(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
35839return wrapper_CPU_Scalar__foreach_lerp(self, tensors1, weight);
35840}
35841void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
35842return wrapper_CPU_Scalar__foreach_lerp_(self, tensors1, weight);
35843}
35844at::Tensor bucketize(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
35845return wrapper_CPU_Tensor_bucketize(self, boundaries, out_int32, right);
35846}
35847at::Tensor & bucketize_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
35848return wrapper_CPU_Tensor_out_bucketize_out(self, boundaries, out_int32, right, out);
35849}
35850at::Tensor & bucketize_outf(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
35851return wrapper_CPU_Tensor_out_bucketize_out(self, boundaries, out_int32, right, out);
35852}
35853at::Tensor bucketize(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) {
35854return wrapper_CPU_Scalar_bucketize(self, boundaries, out_int32, right);
35855}
35856at::Tensor searchsorted(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
35857return wrapper_CPU_Tensor_searchsorted(sorted_sequence, self, out_int32, right, side, sorter);
35858}
35859at::Tensor & searchsorted_out(at::Tensor & out, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
35860return wrapper_CPU_Tensor_out_searchsorted_out(sorted_sequence, self, out_int32, right, side, sorter, out);
35861}
35862at::Tensor & searchsorted_outf(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter, at::Tensor & out) {
35863return wrapper_CPU_Tensor_out_searchsorted_out(sorted_sequence, self, out_int32, right, side, sorter, out);
35864}
35865at::Tensor searchsorted(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
35866return wrapper_CPU_Scalar_searchsorted(sorted_sequence, self, out_int32, right, side, sorter);
35867}
35868at::Tensor _convert_indices_from_coo_to_csr(const at::Tensor & self, int64_t size, bool out_int32) {
35869return wrapper_CPU__convert_indices_from_coo_to_csr(self, size, out_int32);
35870}
35871at::Tensor & _convert_indices_from_coo_to_csr_out(at::Tensor & out, const at::Tensor & self, int64_t size, bool out_int32) {
35872return wrapper_CPU__convert_indices_from_coo_to_csr_out_out(self, size, out_int32, out);
35873}
35874at::Tensor & _convert_indices_from_coo_to_csr_outf(const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out) {
35875return wrapper_CPU__convert_indices_from_coo_to_csr_out_out(self, size, out_int32, out);
35876}
35877at::Tensor _convert_indices_from_csr_to_coo(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
35878return wrapper_CPU__convert_indices_from_csr_to_coo(crow_indices, col_indices, out_int32, transpose);
35879}
35880at::Tensor & _convert_indices_from_csr_to_coo_out(at::Tensor & out, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
35881return wrapper_CPU__convert_indices_from_csr_to_coo_out_out(crow_indices, col_indices, out_int32, transpose, out);
35882}
35883at::Tensor & _convert_indices_from_csr_to_coo_outf(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out) {
35884return wrapper_CPU__convert_indices_from_csr_to_coo_out_out(crow_indices, col_indices, out_int32, transpose, out);
35885}
35886at::Tensor mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
35887return wrapper_CPU_mse_loss(self, target, reduction);
35888}
35889at::Tensor & mse_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
35890return wrapper_CPU_mse_loss_out_out(self, target, reduction, out);
35891}
35892at::Tensor & mse_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
35893return wrapper_CPU_mse_loss_out_out(self, target, reduction, out);
35894}
35895at::Tensor mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
35896return wrapper_CPU__mse_loss_backward(grad_output, self, target, reduction);
35897}
35898at::Tensor & mse_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
35899return wrapper_CPU_grad_input_mse_loss_backward_out(grad_output, self, target, reduction, grad_input);
35900}
35901at::Tensor & mse_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
35902return wrapper_CPU_grad_input_mse_loss_backward_out(grad_output, self, target, reduction, grad_input);
35903}
35904at::Tensor multi_margin_loss(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
35905return wrapper_CPU__multi_margin_loss(self, target, p, margin, weight, reduction);
35906}
35907at::Tensor & multi_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
35908return wrapper_CPU_out_multi_margin_loss_out(self, target, p, margin, weight, reduction, out);
35909}
35910at::Tensor & multi_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
35911return wrapper_CPU_out_multi_margin_loss_out(self, target, p, margin, weight, reduction, out);
35912}
35913at::Tensor multi_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
35914return wrapper_CPU__multi_margin_loss_backward(grad_output, self, target, p, margin, weight, reduction);
35915}
35916at::Tensor & multi_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
35917return wrapper_CPU_grad_input_multi_margin_loss_backward_out(grad_output, self, target, p, margin, weight, reduction, grad_input);
35918}
35919at::Tensor & multi_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
35920return wrapper_CPU_grad_input_multi_margin_loss_backward_out(grad_output, self, target, p, margin, weight, reduction, grad_input);
35921}
35922::std::tuple<at::Tensor,at::Tensor> multilabel_margin_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
35923return wrapper_CPU__multilabel_margin_loss_forward(self, target, reduction);
35924}
35925::std::tuple<at::Tensor &,at::Tensor &> multilabel_margin_loss_forward_out(at::Tensor & output, at::Tensor & is_target, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
35926return wrapper_CPU_output_multilabel_margin_loss_forward_out(self, target, reduction, output, is_target);
35927}
35928::std::tuple<at::Tensor &,at::Tensor &> multilabel_margin_loss_forward_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & output, at::Tensor & is_target) {
35929return wrapper_CPU_output_multilabel_margin_loss_forward_out(self, target, reduction, output, is_target);
35930}
35931at::Tensor multilabel_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
35932return wrapper_CPU__multilabel_margin_loss_backward(grad_output, self, target, reduction, is_target);
35933}
35934at::Tensor & multilabel_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
35935return wrapper_CPU_grad_input_multilabel_margin_loss_backward_out(grad_output, self, target, reduction, is_target, grad_input);
35936}
35937at::Tensor & multilabel_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input) {
35938return wrapper_CPU_grad_input_multilabel_margin_loss_backward_out(grad_output, self, target, reduction, is_target, grad_input);
35939}
35940::std::tuple<at::Tensor,at::Tensor> nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
35941return wrapper_CPU_nll_loss_forward(self, target, weight, reduction, ignore_index);
35942}
35943::std::tuple<at::Tensor,at::Tensor> nll_loss_forward_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
35944return wrapper_CPU_nll_loss_forward(self, target, weight, reduction, ignore_index.expect_int());
35945}
35946::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
35947return wrapper_CPU_nll_loss_forward_out_output(self, target, weight, reduction, ignore_index, output, total_weight);
35948}
35949::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) {
35950return wrapper_CPU_nll_loss_forward_out_output(self, target, weight, reduction, ignore_index, output, total_weight);
35951}
35952::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_symint_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
35953return wrapper_CPU_nll_loss_forward_out_output(self, target, weight, reduction, ignore_index.expect_int(), output, total_weight);
35954}
35955::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_symint_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
35956return wrapper_CPU_nll_loss_forward_out_output(self, target, weight, reduction, ignore_index.expect_int(), output, total_weight);
35957}
35958at::Tensor nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
35959return wrapper_CPU_nll_loss_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight);
35960}
35961at::Tensor nll_loss_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
35962return wrapper_CPU_nll_loss_backward(grad_output, self, target, weight, reduction, ignore_index.expect_int(), total_weight);
35963}
35964at::Tensor & nll_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
35965return wrapper_CPU_nll_loss_backward_out_grad_input(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
35966}
35967at::Tensor & nll_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
35968return wrapper_CPU_nll_loss_backward_out_grad_input(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
35969}
35970at::Tensor & nll_loss_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
35971return wrapper_CPU_nll_loss_backward_out_grad_input(grad_output, self, target, weight, reduction, ignore_index.expect_int(), total_weight, grad_input);
35972}
35973at::Tensor & nll_loss_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
35974return wrapper_CPU_nll_loss_backward_out_grad_input(grad_output, self, target, weight, reduction, ignore_index.expect_int(), total_weight, grad_input);
35975}
35976::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
35977return wrapper_CPU__nll_loss2d_forward(self, target, weight, reduction, ignore_index);
35978}
35979::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
35980return wrapper_CPU__nll_loss2d_forward(self, target, weight, reduction, ignore_index);
35981}
35982::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
35983return wrapper_CPU_output_nll_loss2d_forward_out(self, target, weight, reduction, ignore_index, output, total_weight);
35984}
35985::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) {
35986return wrapper_CPU_output_nll_loss2d_forward_out(self, target, weight, reduction, ignore_index, output, total_weight);
35987}
35988::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_symint_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
35989return wrapper_CPU_output_nll_loss2d_forward_out(self, target, weight, reduction, ignore_index, output, total_weight);
35990}
35991::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_symint_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
35992return wrapper_CPU_output_nll_loss2d_forward_out(self, target, weight, reduction, ignore_index, output, total_weight);
35993}
35994at::Tensor nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
35995return wrapper_CPU__nll_loss2d_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight);
35996}
35997at::Tensor nll_loss2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
35998return wrapper_CPU__nll_loss2d_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight);
35999}
36000at::Tensor & nll_loss2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
36001return wrapper_CPU_grad_input_nll_loss2d_backward_out(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
36002}
36003at::Tensor & nll_loss2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
36004return wrapper_CPU_grad_input_nll_loss2d_backward_out(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
36005}
36006at::Tensor & nll_loss2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
36007return wrapper_CPU_grad_input_nll_loss2d_backward_out(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
36008}
36009at::Tensor & nll_loss2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
36010return wrapper_CPU_grad_input_nll_loss2d_backward_out(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
36011}
36012at::Tensor smooth_l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
36013return wrapper_CPU_smooth_l1_loss(self, target, reduction, beta);
36014}
36015at::Tensor & smooth_l1_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
36016return wrapper_CPU_smooth_l1_loss_out_out(self, target, reduction, beta, out);
36017}
36018at::Tensor & smooth_l1_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out) {
36019return wrapper_CPU_smooth_l1_loss_out_out(self, target, reduction, beta, out);
36020}
36021at::Tensor & smooth_l1_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
36022return wrapper_CPU_grad_input_smooth_l1_loss_backward_out(grad_output, self, target, reduction, beta, grad_input);
36023}
36024at::Tensor & smooth_l1_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input) {
36025return wrapper_CPU_grad_input_smooth_l1_loss_backward_out(grad_output, self, target, reduction, beta, grad_input);
36026}
36027at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
36028return wrapper_CPU__huber_loss(self, target, reduction, delta);
36029}
36030at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
36031return wrapper_CPU_out_huber_loss_out(self, target, reduction, delta, out);
36032}
36033at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) {
36034return wrapper_CPU_out_huber_loss_out(self, target, reduction, delta, out);
36035}
36036at::Tensor & huber_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
36037return wrapper_CPU_out_huber_loss_backward_out(grad_output, self, target, reduction, delta, grad_input);
36038}
36039at::Tensor & huber_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) {
36040return wrapper_CPU_out_huber_loss_backward_out(grad_output, self, target, reduction, delta, grad_input);
36041}
36042at::Tensor elu(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
36043return wrapper_CPU_elu(self, alpha, scale, input_scale);
36044}
36045at::Tensor & elu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
36046return wrapper_CPU_elu_out_out(self, alpha, scale, input_scale, out);
36047}
36048at::Tensor & elu_outf(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out) {
36049return wrapper_CPU_elu_out_out(self, alpha, scale, input_scale, out);
36050}
36051at::Tensor & elu_(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
36052return wrapper_CPU_elu_(self, alpha, scale, input_scale);
36053}
36054at::Tensor elu_backward(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
36055return wrapper_CPU_elu_backward(grad_output, alpha, scale, input_scale, is_result, self_or_result);
36056}
36057at::Tensor & elu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
36058return wrapper_CPU_elu_backward_out_grad_input(grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input);
36059}
36060at::Tensor & elu_backward_outf(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) {
36061return wrapper_CPU_elu_backward_out_grad_input(grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input);
36062}
36063at::Tensor glu(const at::Tensor & self, int64_t dim) {
36064return wrapper_CPU_glu(self, dim);
36065}
36066at::Tensor & glu_out(at::Tensor & out, const at::Tensor & self, int64_t dim) {
36067return wrapper_CPU_glu_out_out(self, dim, out);
36068}
36069at::Tensor & glu_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) {
36070return wrapper_CPU_glu_out_out(self, dim, out);
36071}
36072at::Tensor glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
36073return wrapper_CPU__glu_backward(grad_output, self, dim);
36074}
36075at::Tensor & glu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
36076return wrapper_CPU_grad_input_glu_backward_out(grad_output, self, dim, grad_input);
36077}
36078at::Tensor & glu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) {
36079return wrapper_CPU_grad_input_glu_backward_out(grad_output, self, dim, grad_input);
36080}
36081at::Tensor glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
36082return wrapper_CPU__glu_jvp(glu, x, dx, dim);
36083}
36084at::Tensor glu_backward_jvp(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
36085return wrapper_CPU__glu_backward_jvp(grad_x, grad_glu, x, dgrad_glu, dx, dim);
36086}
36087at::Tensor hardsigmoid(const at::Tensor & self) {
36088return wrapper_CPU_hardsigmoid(self);
36089}
36090at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self) {
36091return wrapper_CPU_hardsigmoid_out_out(self, out);
36092}
36093at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out) {
36094return wrapper_CPU_hardsigmoid_out_out(self, out);
36095}
36096at::Tensor & hardsigmoid_(at::Tensor & self) {
36097return wrapper_CPU_hardsigmoid_(self);
36098}
36099at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self) {
36100return wrapper_CPU_hardsigmoid_backward(grad_output, self);
36101}
36102at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) {
36103return wrapper_CPU_hardsigmoid_backward_out_grad_input(grad_output, self, grad_input);
36104}
36105at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
36106return wrapper_CPU_hardsigmoid_backward_out_grad_input(grad_output, self, grad_input);
36107}
36108at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
36109return wrapper_CPU__hardtanh(self, min_val, max_val);
36110}
36111at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
36112return wrapper_CPU_out_hardtanh_out(self, min_val, max_val, out);
36113}
36114at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) {
36115return wrapper_CPU_out_hardtanh_out(self, min_val, max_val, out);
36116}
36117at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
36118return wrapper_CPU__hardtanh_(self, min_val, max_val);
36119}
36120at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
36121return wrapper_CPU__hardtanh_backward(grad_output, self, min_val, max_val);
36122}
36123at::Tensor & hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
36124return wrapper_CPU_grad_input_hardtanh_backward_out(grad_output, self, min_val, max_val, grad_input);
36125}
36126at::Tensor & hardtanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) {
36127return wrapper_CPU_grad_input_hardtanh_backward_out(grad_output, self, min_val, max_val, grad_input);
36128}
36129at::Tensor hardswish(const at::Tensor & self) {
36130return wrapper_CPU__hardswish(self);
36131}
36132at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self) {
36133return wrapper_CPU_out_hardswish_out(self, out);
36134}
36135at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out) {
36136return wrapper_CPU_out_hardswish_out(self, out);
36137}
36138at::Tensor & hardswish_(at::Tensor & self) {
36139return wrapper_CPU__hardswish_(self);
36140}
36141at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self) {
36142return wrapper_CPU__hardswish_backward(grad_output, self);
36143}
36144at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope) {
36145return wrapper_CPU_leaky_relu(self, negative_slope);
36146}
36147at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope) {
36148return wrapper_CPU_leaky_relu_out_out(self, negative_slope, out);
36149}
36150at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
36151return wrapper_CPU_leaky_relu_out_out(self, negative_slope, out);
36152}
36153at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope) {
36154return wrapper_CPU_leaky_relu_(self, negative_slope);
36155}
36156at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
36157return wrapper_CPU_leaky_relu_backward(grad_output, self, negative_slope, self_is_result);
36158}
36159at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
36160return wrapper_CPU_leaky_relu_backward_out_grad_input(grad_output, self, negative_slope, self_is_result, grad_input);
36161}
36162at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) {
36163return wrapper_CPU_leaky_relu_backward_out_grad_input(grad_output, self, negative_slope, self_is_result, grad_input);
36164}
36165::std::tuple<at::Tensor,at::Tensor> log_sigmoid_forward(const at::Tensor & self) {
36166return wrapper_CPU__log_sigmoid_forward(self);
36167}
36168::std::tuple<at::Tensor &,at::Tensor &> log_sigmoid_forward_out(at::Tensor & output, at::Tensor & buffer, const at::Tensor & self) {
36169return wrapper_CPU_output_log_sigmoid_forward_out(self, output, buffer);
36170}
36171::std::tuple<at::Tensor &,at::Tensor &> log_sigmoid_forward_outf(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) {
36172return wrapper_CPU_output_log_sigmoid_forward_out(self, output, buffer);
36173}
36174at::Tensor log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
36175return wrapper_CPU__log_sigmoid_backward(grad_output, self, buffer);
36176}
36177at::Tensor & log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
36178return wrapper_CPU_grad_input_log_sigmoid_backward_out(grad_output, self, buffer, grad_input);
36179}
36180at::Tensor & log_sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) {
36181return wrapper_CPU_grad_input_log_sigmoid_backward_out(grad_output, self, buffer, grad_input);
36182}
36183at::Tensor rrelu_with_noise(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
36184return wrapper_CPU__rrelu_with_noise(self, noise, lower, upper, training, generator);
36185}
36186at::Tensor & rrelu_with_noise_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
36187return wrapper_CPU_out_rrelu_with_noise_out(self, noise, lower, upper, training, generator, out);
36188}
36189at::Tensor & rrelu_with_noise_outf(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator, at::Tensor & out) {
36190return wrapper_CPU_out_rrelu_with_noise_out(self, noise, lower, upper, training, generator, out);
36191}
36192at::Tensor & rrelu_with_noise_(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
36193return wrapper_CPU__rrelu_with_noise_(self, noise, lower, upper, training, generator);
36194}
36195at::Tensor softplus(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
36196return wrapper_CPU_softplus(self, beta, threshold);
36197}
36198at::Tensor & softplus_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
36199return wrapper_CPU_softplus_out_out(self, beta, threshold, out);
36200}
36201at::Tensor & softplus_outf(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) {
36202return wrapper_CPU_softplus_out_out(self, beta, threshold, out);
36203}
36204at::Tensor softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
36205return wrapper_CPU_softplus_backward(grad_output, self, beta, threshold);
36206}
36207at::Tensor & softplus_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
36208return wrapper_CPU_softplus_backward_out_grad_input(grad_output, self, beta, threshold, grad_input);
36209}
36210at::Tensor & softplus_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) {
36211return wrapper_CPU_softplus_backward_out_grad_input(grad_output, self, beta, threshold, grad_input);
36212}
36213at::Tensor softshrink(const at::Tensor & self, const at::Scalar & lambd) {
36214return wrapper_CPU_softshrink(self, lambd);
36215}
36216at::Tensor & softshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd) {
36217return wrapper_CPU_softshrink_out_out(self, lambd, out);
36218}
36219at::Tensor & softshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
36220return wrapper_CPU_softshrink_out_out(self, lambd, out);
36221}
36222at::Tensor softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
36223return wrapper_CPU_softshrink_backward(grad_output, self, lambd);
36224}
36225at::Tensor & softshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
36226return wrapper_CPU_softshrink_backward_out_grad_input(grad_output, self, lambd, grad_input);
36227}
36228at::Tensor & softshrink_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
36229return wrapper_CPU_softshrink_backward_out_grad_input(grad_output, self, lambd, grad_input);
36230}
36231at::Tensor & adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
36232return wrapper_CPU_out_adaptive_avg_pool2d_out(self, c10::fromIntArrayRefSlow(output_size), out);
36233}
36234at::Tensor & adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
36235return wrapper_CPU_out_adaptive_avg_pool2d_out(self, c10::fromIntArrayRefSlow(output_size), out);
36236}
36237at::Tensor & adaptive_avg_pool2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) {
36238return wrapper_CPU_out_adaptive_avg_pool2d_out(self, output_size, out);
36239}
36240at::Tensor & adaptive_avg_pool2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
36241return wrapper_CPU_out_adaptive_avg_pool2d_out(self, output_size, out);
36242}
36243at::Tensor _adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
36244return wrapper_CPU___adaptive_avg_pool2d(self, c10::fromIntArrayRefSlow(output_size));
36245}
36246at::Tensor _adaptive_avg_pool2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) {
36247return wrapper_CPU___adaptive_avg_pool2d(self, output_size);
36248}
36249at::Tensor _adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self) {
36250return wrapper_CPU___adaptive_avg_pool2d_backward(grad_output, self);
36251}
36252at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
36253return wrapper_CPU_out_adaptive_avg_pool3d_out(self, c10::fromIntArrayRefSlow(output_size), out);
36254}
36255at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
36256return wrapper_CPU_out_adaptive_avg_pool3d_out(self, c10::fromIntArrayRefSlow(output_size), out);
36257}
36258at::Tensor & adaptive_avg_pool3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) {
36259return wrapper_CPU_out_adaptive_avg_pool3d_out(self, output_size, out);
36260}
36261at::Tensor & adaptive_avg_pool3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
36262return wrapper_CPU_out_adaptive_avg_pool3d_out(self, output_size, out);
36263}
36264at::Tensor _adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
36265return wrapper_CPU___adaptive_avg_pool3d(self, c10::fromIntArrayRefSlow(output_size));
36266}
36267at::Tensor _adaptive_avg_pool3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) {
36268return wrapper_CPU___adaptive_avg_pool3d(self, output_size);
36269}
36270at::Tensor & adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) {
36271return wrapper_CPU_grad_input_adaptive_avg_pool3d_backward_out(grad_output, self, grad_input);
36272}
36273at::Tensor & adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
36274return wrapper_CPU_grad_input_adaptive_avg_pool3d_backward_out(grad_output, self, grad_input);
36275}
36276at::Tensor _adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self) {
36277return wrapper_CPU___adaptive_avg_pool3d_backward(grad_output, self);
36278}
36279::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
36280return wrapper_CPU_adaptive_max_pool2d(self, output_size);
36281}
36282::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) {
36283return wrapper_CPU_adaptive_max_pool2d_out_out(self, output_size, out, indices);
36284}
36285::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
36286return wrapper_CPU_adaptive_max_pool2d_out_out(self, output_size, out, indices);
36287}
36288at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
36289return wrapper_CPU_adaptive_max_pool2d_backward(grad_output, self, indices);
36290}
36291at::Tensor & adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
36292return wrapper_CPU_adaptive_max_pool2d_backward_out_grad_input(grad_output, self, indices, grad_input);
36293}
36294at::Tensor & adaptive_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
36295return wrapper_CPU_adaptive_max_pool2d_backward_out_grad_input(grad_output, self, indices, grad_input);
36296}
36297::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
36298return wrapper_CPU_adaptive_max_pool3d(self, output_size);
36299}
36300::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) {
36301return wrapper_CPU_adaptive_max_pool3d_out_out(self, output_size, out, indices);
36302}
36303::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
36304return wrapper_CPU_adaptive_max_pool3d_out_out(self, output_size, out, indices);
36305}
36306at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
36307return wrapper_CPU_adaptive_max_pool3d_backward(grad_output, self, indices);
36308}
36309at::Tensor & adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
36310return wrapper_CPU_adaptive_max_pool3d_backward_out_grad_input(grad_output, self, indices, grad_input);
36311}
36312at::Tensor & adaptive_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
36313return wrapper_CPU_adaptive_max_pool3d_backward_out_grad_input(grad_output, self, indices, grad_input);
36314}
36315at::Tensor avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
36316return wrapper_CPU_avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
36317}
36318at::Tensor & avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
36319return wrapper_CPU_avg_pool2d_out_out(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
36320}
36321at::Tensor & avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out) {
36322return wrapper_CPU_avg_pool2d_out_out(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
36323}
36324at::Tensor avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
36325return wrapper_CPU_avg_pool2d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
36326}
36327at::Tensor & avg_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
36328return wrapper_CPU_avg_pool2d_backward_out_grad_input(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
36329}
36330at::Tensor & avg_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input) {
36331return wrapper_CPU_avg_pool2d_backward_out_grad_input(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
36332}
36333at::Tensor avg_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
36334return wrapper_CPU_avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
36335}
36336at::Tensor & avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
36337return wrapper_CPU_avg_pool3d_out_out(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
36338}
36339at::Tensor & avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out) {
36340return wrapper_CPU_avg_pool3d_out_out(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
36341}
36342at::Tensor avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
36343return wrapper_CPU_avg_pool3d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
36344}
36345at::Tensor & avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
36346return wrapper_CPU_avg_pool3d_backward_out_grad_input(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
36347}
36348at::Tensor & avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input) {
36349return wrapper_CPU_avg_pool3d_backward_out_grad_input(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
36350}
36351::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
36352return wrapper_CPU_fractional_max_pool2d(self, kernel_size, output_size, random_samples);
36353}
36354::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool2d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
36355return wrapper_CPU_fractional_max_pool2d_out_output(self, kernel_size, output_size, random_samples, output, indices);
36356}
36357::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
36358return wrapper_CPU_fractional_max_pool2d_out_output(self, kernel_size, output_size, random_samples, output, indices);
36359}
36360at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
36361return wrapper_CPU_fractional_max_pool2d_backward(grad_output, self, kernel_size, output_size, indices);
36362}
36363at::Tensor & fractional_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
36364return wrapper_CPU_fractional_max_pool2d_backward_out_grad_input(grad_output, self, kernel_size, output_size, indices, grad_input);
36365}
36366at::Tensor & fractional_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
36367return wrapper_CPU_fractional_max_pool2d_backward_out_grad_input(grad_output, self, kernel_size, output_size, indices, grad_input);
36368}
36369::std::tuple<at::Tensor,at::Tensor> fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
36370return wrapper_CPU_fractional_max_pool3d(self, kernel_size, output_size, random_samples);
36371}
36372::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
36373return wrapper_CPU_fractional_max_pool3d_out_output(self, kernel_size, output_size, random_samples, output, indices);
36374}
36375::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
36376return wrapper_CPU_fractional_max_pool3d_out_output(self, kernel_size, output_size, random_samples, output, indices);
36377}
36378at::Tensor fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
36379return wrapper_CPU__fractional_max_pool3d_backward(grad_output, self, kernel_size, output_size, indices);
36380}
36381at::Tensor & fractional_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
36382return wrapper_CPU_grad_input_fractional_max_pool3d_backward_out(grad_output, self, kernel_size, output_size, indices, grad_input);
36383}
36384at::Tensor & fractional_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
36385return wrapper_CPU_grad_input_fractional_max_pool3d_backward_out(grad_output, self, kernel_size, output_size, indices, grad_input);
36386}
36387::std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
36388return wrapper_CPU_max_pool2d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
36389}
36390::std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
36391return wrapper_CPU_max_pool2d_with_indices_out_out(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
36392}
36393::std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
36394return wrapper_CPU_max_pool2d_with_indices_out_out(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
36395}
36396at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
36397return wrapper_CPU_max_pool2d_with_indices_backward(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
36398}
36399at::Tensor & max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
36400return wrapper_CPU_max_pool2d_with_indices_backward_out_grad_input(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
36401}
36402at::Tensor & max_pool2d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
36403return wrapper_CPU_max_pool2d_with_indices_backward_out_grad_input(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
36404}
36405::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
36406return wrapper_CPU__max_pool3d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
36407}
36408::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
36409return wrapper_CPU_out_max_pool3d_with_indices_out(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
36410}
36411::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
36412return wrapper_CPU_out_max_pool3d_with_indices_out(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
36413}
36414at::Tensor max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
36415return wrapper_CPU__max_pool3d_with_indices_backward(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
36416}
36417at::Tensor & max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
36418return wrapper_CPU_grad_input_max_pool3d_with_indices_backward_out(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
36419}
36420at::Tensor & max_pool3d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
36421return wrapper_CPU_grad_input_max_pool3d_with_indices_backward_out(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
36422}
36423at::Tensor max_unpool2d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) {
36424return wrapper_CPU__max_unpool2d(self, indices, output_size);
36425}
36426at::Tensor & max_unpool2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) {
36427return wrapper_CPU_out_max_unpool2d_out(self, indices, output_size, out);
36428}
36429at::Tensor & max_unpool2d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out) {
36430return wrapper_CPU_out_max_unpool2d_out(self, indices, output_size, out);
36431}
36432at::Tensor max_unpool3d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
36433return wrapper_CPU__max_unpool3d(self, indices, output_size, stride, padding);
36434}
36435at::Tensor & max_unpool3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
36436return wrapper_CPU_out_max_unpool3d_out(self, indices, output_size, stride, padding, out);
36437}
36438at::Tensor & max_unpool3d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
36439return wrapper_CPU_out_max_unpool3d_out(self, indices, output_size, stride, padding, out);
36440}
36441at::Tensor reflection_pad1d(const at::Tensor & self, at::IntArrayRef padding) {
36442return wrapper_CPU_reflection_pad1d(self, padding);
36443}
36444at::Tensor reflection_pad1d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
36445return wrapper_CPU_reflection_pad1d(self, C10_AS_INTARRAYREF_SLOW(padding));
36446}
36447at::Tensor & reflection_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
36448return wrapper_CPU_reflection_pad1d_out_out(self, padding, out);
36449}
36450at::Tensor & reflection_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
36451return wrapper_CPU_reflection_pad1d_out_out(self, padding, out);
36452}
36453at::Tensor & reflection_pad1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
36454return wrapper_CPU_reflection_pad1d_out_out(self, C10_AS_INTARRAYREF_SLOW(padding), out);
36455}
36456at::Tensor & reflection_pad1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
36457return wrapper_CPU_reflection_pad1d_out_out(self, C10_AS_INTARRAYREF_SLOW(padding), out);
36458}
36459at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
36460return wrapper_CPU_reflection_pad1d_backward(grad_output, self, padding);
36461}
36462at::Tensor reflection_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
36463return wrapper_CPU_reflection_pad1d_backward(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding));
36464}
36465at::Tensor & reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
36466return wrapper_CPU_reflection_pad1d_backward_out_grad_input(grad_output, self, padding, grad_input);
36467}
36468at::Tensor & reflection_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
36469return wrapper_CPU_reflection_pad1d_backward_out_grad_input(grad_output, self, padding, grad_input);
36470}
36471at::Tensor & reflection_pad1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
36472return wrapper_CPU_reflection_pad1d_backward_out_grad_input(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding), grad_input);
36473}
36474at::Tensor & reflection_pad1d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
36475return wrapper_CPU_reflection_pad1d_backward_out_grad_input(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding), grad_input);
36476}
36477at::Tensor reflection_pad2d(const at::Tensor & self, at::IntArrayRef padding) {
36478return wrapper_CPU__reflection_pad2d(self, c10::fromIntArrayRefSlow(padding));
36479}
36480at::Tensor reflection_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
36481return wrapper_CPU__reflection_pad2d(self, padding);
36482}
36483at::Tensor & reflection_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
36484return wrapper_CPU_out_reflection_pad2d_out(self, c10::fromIntArrayRefSlow(padding), out);
36485}
36486at::Tensor & reflection_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
36487return wrapper_CPU_out_reflection_pad2d_out(self, c10::fromIntArrayRefSlow(padding), out);
36488}
36489at::Tensor & reflection_pad2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
36490return wrapper_CPU_out_reflection_pad2d_out(self, padding, out);
36491}
36492at::Tensor & reflection_pad2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
36493return wrapper_CPU_out_reflection_pad2d_out(self, padding, out);
36494}
36495at::Tensor reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
36496return wrapper_CPU__reflection_pad2d_backward(grad_output, self, c10::fromIntArrayRefSlow(padding));
36497}
36498at::Tensor reflection_pad2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
36499return wrapper_CPU__reflection_pad2d_backward(grad_output, self, padding);
36500}
36501at::Tensor & reflection_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
36502return wrapper_CPU_grad_input_reflection_pad2d_backward_out(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
36503}
36504at::Tensor & reflection_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
36505return wrapper_CPU_grad_input_reflection_pad2d_backward_out(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
36506}
36507at::Tensor & reflection_pad2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
36508return wrapper_CPU_grad_input_reflection_pad2d_backward_out(grad_output, self, padding, grad_input);
36509}
36510at::Tensor & reflection_pad2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
36511return wrapper_CPU_grad_input_reflection_pad2d_backward_out(grad_output, self, padding, grad_input);
36512}
36513at::Tensor reflection_pad3d(const at::Tensor & self, at::IntArrayRef padding) {
36514return wrapper_CPU_reflection_pad3d(self, padding);
36515}
36516at::Tensor reflection_pad3d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
36517return wrapper_CPU_reflection_pad3d(self, C10_AS_INTARRAYREF_SLOW(padding));
36518}
36519at::Tensor & reflection_pad3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
36520return wrapper_CPU_reflection_pad3d_out_out(self, padding, out);
36521}
36522at::Tensor & reflection_pad3d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
36523return wrapper_CPU_reflection_pad3d_out_out(self, padding, out);
36524}
36525at::Tensor & reflection_pad3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
36526return wrapper_CPU_reflection_pad3d_out_out(self, C10_AS_INTARRAYREF_SLOW(padding), out);
36527}
36528at::Tensor & reflection_pad3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
36529return wrapper_CPU_reflection_pad3d_out_out(self, C10_AS_INTARRAYREF_SLOW(padding), out);
36530}
36531at::Tensor reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
36532return wrapper_CPU_reflection_pad3d_backward(grad_output, self, padding);
36533}
36534at::Tensor reflection_pad3d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
36535return wrapper_CPU_reflection_pad3d_backward(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding));
36536}
36537at::Tensor & reflection_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
36538return wrapper_CPU_reflection_pad3d_backward_out_grad_input(grad_output, self, padding, grad_input);
36539}
36540at::Tensor & reflection_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
36541return wrapper_CPU_reflection_pad3d_backward_out_grad_input(grad_output, self, padding, grad_input);
36542}
36543at::Tensor & reflection_pad3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
36544return wrapper_CPU_reflection_pad3d_backward_out_grad_input(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding), grad_input);
36545}
36546at::Tensor & reflection_pad3d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
36547return wrapper_CPU_reflection_pad3d_backward_out_grad_input(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding), grad_input);
36548}
36549at::Tensor replication_pad1d(const at::Tensor & self, at::IntArrayRef padding) {
36550return wrapper_CPU_replication_pad1d(self, padding);
36551}
36552at::Tensor replication_pad1d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
36553return wrapper_CPU_replication_pad1d(self, C10_AS_INTARRAYREF_SLOW(padding));
36554}
36555at::Tensor & replication_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
36556return wrapper_CPU_replication_pad1d_out_out(self, padding, out);
36557}
36558at::Tensor & replication_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
36559return wrapper_CPU_replication_pad1d_out_out(self, padding, out);
36560}
36561at::Tensor & replication_pad1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
36562return wrapper_CPU_replication_pad1d_out_out(self, C10_AS_INTARRAYREF_SLOW(padding), out);
36563}
36564at::Tensor & replication_pad1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
36565return wrapper_CPU_replication_pad1d_out_out(self, C10_AS_INTARRAYREF_SLOW(padding), out);
36566}
36567at::Tensor replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
36568return wrapper_CPU_replication_pad1d_backward(grad_output, self, padding);
36569}
36570at::Tensor replication_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
36571return wrapper_CPU_replication_pad1d_backward(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding));
36572}
36573at::Tensor & replication_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
36574return wrapper_CPU_replication_pad1d_backward_out_grad_input(grad_output, self, padding, grad_input);
36575}
36576at::Tensor & replication_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
36577return wrapper_CPU_replication_pad1d_backward_out_grad_input(grad_output, self, padding, grad_input);
36578}
36579at::Tensor & replication_pad1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
36580return wrapper_CPU_replication_pad1d_backward_out_grad_input(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding), grad_input);
36581}
36582at::Tensor & replication_pad1d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
36583return wrapper_CPU_replication_pad1d_backward_out_grad_input(grad_output, self, C10_AS_INTARRAYREF_SLOW(padding), grad_input);
36584}
36585at::Tensor replication_pad2d(const at::Tensor & self, at::IntArrayRef padding) {
36586return wrapper_CPU_replication_pad2d(self, padding);
36587}
36588at::Tensor replication_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
36589return wrapper_CPU_replication_pad2d(self, C10_AS_INTARRAYREF_SLOW(padding));
36590}
36591at::Tensor & replication_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
36592return wrapper_CPU_replication_pad2d_out_out(self, padding, out);
36593}
36594at::Tensor & replication_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
36595return wrapper_CPU_replication_pad2d_out_out(self, padding, out);
36596}
36597at::Tensor & replication_pad2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
36598return wrapper_CPU_replication_pad2d_out_out(self, C10_AS_INTARRAYREF_SLOW(padding), out);
36599}
36600at::Tensor & replication_pad2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
36601return wrapper_CPU_replication_pad2d_out_out(self, C10_AS_INTARRAYREF_SLOW(padding), out);
36602}
36603at::Tensor replication_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
36604return wrapper_CPU__replication_pad2d_backward(grad_output, self, c10::fromIntArrayRefSlow(padding));
36605}
36606at::Tensor replication_pad2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
36607return wrapper_CPU__replication_pad2d_backward(grad_output, self, padding);
36608}
36609at::Tensor & replication_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
36610return wrapper_CPU_grad_input_replication_pad2d_backward_out(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
36611}
36612at::Tensor & replication_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
36613return wrapper_CPU_grad_input_replication_pad2d_backward_out(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
36614}
36615at::Tensor & replication_pad2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
36616return wrapper_CPU_grad_input_replication_pad2d_backward_out(grad_output, self, padding, grad_input);
36617}
36618at::Tensor & replication_pad2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
36619return wrapper_CPU_grad_input_replication_pad2d_backward_out(grad_output, self, padding, grad_input);
36620}
36621at::Tensor replication_pad3d(const at::Tensor & self, at::IntArrayRef padding) {
36622return wrapper_CPU_replication_pad3d(self, padding);
36623}
36624at::Tensor replication_pad3d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
36625return wrapper_CPU_replication_pad3d(self, C10_AS_INTARRAYREF_SLOW(padding));
36626}
36627at::Tensor & replication_pad3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
36628return wrapper_CPU_replication_pad3d_out_out(self, padding, out);
36629}
36630at::Tensor & replication_pad3d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
36631return wrapper_CPU_replication_pad3d_out_out(self, padding, out);
36632}
36633at::Tensor & replication_pad3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
36634return wrapper_CPU_replication_pad3d_out_out(self, C10_AS_INTARRAYREF_SLOW(padding), out);
36635}
36636at::Tensor & replication_pad3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
36637return wrapper_CPU_replication_pad3d_out_out(self, C10_AS_INTARRAYREF_SLOW(padding), out);
36638}
36639at::Tensor replication_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
36640return wrapper_CPU__replication_pad3d_backward(grad_output, self, c10::fromIntArrayRefSlow(padding));
36641}
36642at::Tensor replication_pad3d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
36643return wrapper_CPU__replication_pad3d_backward(grad_output, self, padding);
36644}
36645at::Tensor & replication_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
36646return wrapper_CPU_grad_input_replication_pad3d_backward_out(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
36647}
36648at::Tensor & replication_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
36649return wrapper_CPU_grad_input_replication_pad3d_backward_out(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
36650}
36651at::Tensor & replication_pad3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
36652return wrapper_CPU_grad_input_replication_pad3d_backward_out(grad_output, self, padding, grad_input);
36653}
36654at::Tensor & replication_pad3d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
36655return wrapper_CPU_grad_input_replication_pad3d_backward_out(grad_output, self, padding, grad_input);
36656}
36657at::Tensor upsample_linear1d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
36658return wrapper_CPU_upsample_linear1d(self, output_size, align_corners, scales);
36659}
36660at::Tensor upsample_linear1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
36661return wrapper_CPU_upsample_linear1d(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales);
36662}
36663at::Tensor & upsample_linear1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
36664return wrapper_CPU_upsample_linear1d_out_out(self, output_size, align_corners, scales, out);
36665}
36666at::Tensor & upsample_linear1d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales, at::Tensor & out) {
36667return wrapper_CPU_upsample_linear1d_out_out(self, output_size, align_corners, scales, out);
36668}
36669at::Tensor & upsample_linear1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
36670return wrapper_CPU_upsample_linear1d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales, out);
36671}
36672at::Tensor & upsample_linear1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales, at::Tensor & out) {
36673return wrapper_CPU_upsample_linear1d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales, out);
36674}
36675at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
36676return wrapper_CPU_upsample_linear1d_backward(grad_output, output_size, input_size, align_corners, scales);
36677}
36678at::Tensor upsample_linear1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
36679return wrapper_CPU_upsample_linear1d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales);
36680}
36681at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
36682return wrapper_CPU_upsample_linear1d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales, grad_input);
36683}
36684at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) {
36685return wrapper_CPU_upsample_linear1d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales, grad_input);
36686}
36687at::Tensor & upsample_linear1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
36688return wrapper_CPU_upsample_linear1d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales, grad_input);
36689}
36690at::Tensor & upsample_linear1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) {
36691return wrapper_CPU_upsample_linear1d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales, grad_input);
36692}
36693at::Tensor upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36694return wrapper_CPU_upsample_bilinear2d(self, output_size, align_corners, scales_h, scales_w);
36695}
36696at::Tensor upsample_bilinear2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36697return wrapper_CPU_upsample_bilinear2d(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w);
36698}
36699at::Tensor & upsample_bilinear2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36700return wrapper_CPU_upsample_bilinear2d_out_out(self, output_size, align_corners, scales_h, scales_w, out);
36701}
36702at::Tensor & upsample_bilinear2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36703return wrapper_CPU_upsample_bilinear2d_out_out(self, output_size, align_corners, scales_h, scales_w, out);
36704}
36705at::Tensor & upsample_bilinear2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36706return wrapper_CPU_upsample_bilinear2d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
36707}
36708at::Tensor & upsample_bilinear2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36709return wrapper_CPU_upsample_bilinear2d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
36710}
36711at::Tensor upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36712return wrapper_CPU_upsample_bilinear2d_backward(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
36713}
36714at::Tensor upsample_bilinear2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36715return wrapper_CPU_upsample_bilinear2d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w);
36716}
36717at::Tensor & upsample_bilinear2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36718return wrapper_CPU_upsample_bilinear2d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
36719}
36720at::Tensor & upsample_bilinear2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
36721return wrapper_CPU_upsample_bilinear2d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
36722}
36723at::Tensor & upsample_bilinear2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36724return wrapper_CPU_upsample_bilinear2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
36725}
36726at::Tensor & upsample_bilinear2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
36727return wrapper_CPU_upsample_bilinear2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
36728}
36729at::Tensor _upsample_bilinear2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36730return wrapper_CPU__upsample_bilinear2d_aa(self, output_size, align_corners, scales_h, scales_w);
36731}
36732at::Tensor _upsample_bilinear2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36733return wrapper_CPU__upsample_bilinear2d_aa(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w);
36734}
36735at::Tensor & _upsample_bilinear2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36736return wrapper_CPU__upsample_bilinear2d_aa_out_out(self, output_size, align_corners, scales_h, scales_w, out);
36737}
36738at::Tensor & _upsample_bilinear2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36739return wrapper_CPU__upsample_bilinear2d_aa_out_out(self, output_size, align_corners, scales_h, scales_w, out);
36740}
36741at::Tensor & _upsample_bilinear2d_aa_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36742return wrapper_CPU__upsample_bilinear2d_aa_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
36743}
36744at::Tensor & _upsample_bilinear2d_aa_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36745return wrapper_CPU__upsample_bilinear2d_aa_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
36746}
36747at::Tensor _upsample_bilinear2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36748return wrapper_CPU__upsample_bilinear2d_aa_backward(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
36749}
36750at::Tensor _upsample_bilinear2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36751return wrapper_CPU__upsample_bilinear2d_aa_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w);
36752}
36753at::Tensor & _upsample_bilinear2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36754return wrapper_CPU__upsample_bilinear2d_aa_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
36755}
36756at::Tensor & _upsample_bilinear2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
36757return wrapper_CPU__upsample_bilinear2d_aa_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
36758}
36759at::Tensor & _upsample_bilinear2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36760return wrapper_CPU__upsample_bilinear2d_aa_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
36761}
36762at::Tensor & _upsample_bilinear2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
36763return wrapper_CPU__upsample_bilinear2d_aa_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
36764}
36765at::Tensor upsample_bicubic2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36766return wrapper_CPU_upsample_bicubic2d(self, output_size, align_corners, scales_h, scales_w);
36767}
36768at::Tensor upsample_bicubic2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36769return wrapper_CPU_upsample_bicubic2d(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w);
36770}
36771at::Tensor & upsample_bicubic2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36772return wrapper_CPU_upsample_bicubic2d_out_out(self, output_size, align_corners, scales_h, scales_w, out);
36773}
36774at::Tensor & upsample_bicubic2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36775return wrapper_CPU_upsample_bicubic2d_out_out(self, output_size, align_corners, scales_h, scales_w, out);
36776}
36777at::Tensor & upsample_bicubic2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36778return wrapper_CPU_upsample_bicubic2d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
36779}
36780at::Tensor & upsample_bicubic2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36781return wrapper_CPU_upsample_bicubic2d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
36782}
36783at::Tensor upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36784return wrapper_CPU_upsample_bicubic2d_backward(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
36785}
36786at::Tensor upsample_bicubic2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36787return wrapper_CPU_upsample_bicubic2d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w);
36788}
36789at::Tensor & upsample_bicubic2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36790return wrapper_CPU_upsample_bicubic2d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
36791}
36792at::Tensor & upsample_bicubic2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
36793return wrapper_CPU_upsample_bicubic2d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
36794}
36795at::Tensor & upsample_bicubic2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36796return wrapper_CPU_upsample_bicubic2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
36797}
36798at::Tensor & upsample_bicubic2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
36799return wrapper_CPU_upsample_bicubic2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
36800}
36801at::Tensor _upsample_bicubic2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36802return wrapper_CPU__upsample_bicubic2d_aa(self, output_size, align_corners, scales_h, scales_w);
36803}
36804at::Tensor _upsample_bicubic2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36805return wrapper_CPU__upsample_bicubic2d_aa(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w);
36806}
36807at::Tensor & _upsample_bicubic2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36808return wrapper_CPU__upsample_bicubic2d_aa_out_out(self, output_size, align_corners, scales_h, scales_w, out);
36809}
36810at::Tensor & _upsample_bicubic2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36811return wrapper_CPU__upsample_bicubic2d_aa_out_out(self, output_size, align_corners, scales_h, scales_w, out);
36812}
36813at::Tensor & _upsample_bicubic2d_aa_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36814return wrapper_CPU__upsample_bicubic2d_aa_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
36815}
36816at::Tensor & _upsample_bicubic2d_aa_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36817return wrapper_CPU__upsample_bicubic2d_aa_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_h, scales_w, out);
36818}
36819at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36820return wrapper_CPU__upsample_bicubic2d_aa_backward(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
36821}
36822at::Tensor _upsample_bicubic2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36823return wrapper_CPU__upsample_bicubic2d_aa_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w);
36824}
36825at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36826return wrapper_CPU__upsample_bicubic2d_aa_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
36827}
36828at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
36829return wrapper_CPU__upsample_bicubic2d_aa_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
36830}
36831at::Tensor & _upsample_bicubic2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36832return wrapper_CPU__upsample_bicubic2d_aa_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
36833}
36834at::Tensor & _upsample_bicubic2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
36835return wrapper_CPU__upsample_bicubic2d_aa_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_h, scales_w, grad_input);
36836}
36837at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36838return wrapper_CPU_upsample_trilinear3d(self, output_size, align_corners, scales_d, scales_h, scales_w);
36839}
36840at::Tensor upsample_trilinear3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36841return wrapper_CPU_upsample_trilinear3d(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_d, scales_h, scales_w);
36842}
36843at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36844return wrapper_CPU_upsample_trilinear3d_out_out(self, output_size, align_corners, scales_d, scales_h, scales_w, out);
36845}
36846at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36847return wrapper_CPU_upsample_trilinear3d_out_out(self, output_size, align_corners, scales_d, scales_h, scales_w, out);
36848}
36849at::Tensor & upsample_trilinear3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36850return wrapper_CPU_upsample_trilinear3d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_d, scales_h, scales_w, out);
36851}
36852at::Tensor & upsample_trilinear3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36853return wrapper_CPU_upsample_trilinear3d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), align_corners, scales_d, scales_h, scales_w, out);
36854}
36855at::Tensor upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36856return wrapper_CPU_upsample_trilinear3d_backward(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
36857}
36858at::Tensor upsample_trilinear3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36859return wrapper_CPU_upsample_trilinear3d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_d, scales_h, scales_w);
36860}
36861at::Tensor & upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36862return wrapper_CPU_upsample_trilinear3d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input);
36863}
36864at::Tensor & upsample_trilinear3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
36865return wrapper_CPU_upsample_trilinear3d_backward_out_grad_input(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input);
36866}
36867at::Tensor & upsample_trilinear3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36868return wrapper_CPU_upsample_trilinear3d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_d, scales_h, scales_w, grad_input);
36869}
36870at::Tensor & upsample_trilinear3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
36871return wrapper_CPU_upsample_trilinear3d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), align_corners, scales_d, scales_h, scales_w, grad_input);
36872}
36873at::Tensor upsample_nearest1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales) {
36874return wrapper_CPU_upsample_nearest1d(self, output_size, scales);
36875}
36876at::Tensor upsample_nearest1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
36877return wrapper_CPU_upsample_nearest1d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales);
36878}
36879at::Tensor & upsample_nearest1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales) {
36880return wrapper_CPU_upsample_nearest1d_out_out(self, output_size, scales, out);
36881}
36882at::Tensor & upsample_nearest1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
36883return wrapper_CPU_upsample_nearest1d_out_out(self, output_size, scales, out);
36884}
36885at::Tensor & upsample_nearest1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
36886return wrapper_CPU_upsample_nearest1d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales, out);
36887}
36888at::Tensor & upsample_nearest1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
36889return wrapper_CPU_upsample_nearest1d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales, out);
36890}
36891at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales) {
36892return wrapper_CPU__upsample_nearest_exact1d(self, output_size, scales);
36893}
36894at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
36895return wrapper_CPU__upsample_nearest_exact1d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales);
36896}
36897at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales) {
36898return wrapper_CPU__upsample_nearest_exact1d_out_out(self, output_size, scales, out);
36899}
36900at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
36901return wrapper_CPU__upsample_nearest_exact1d_out_out(self, output_size, scales, out);
36902}
36903at::Tensor & _upsample_nearest_exact1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
36904return wrapper_CPU__upsample_nearest_exact1d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales, out);
36905}
36906at::Tensor & _upsample_nearest_exact1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
36907return wrapper_CPU__upsample_nearest_exact1d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales, out);
36908}
36909at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales) {
36910return wrapper_CPU_upsample_nearest1d_backward(grad_output, output_size, input_size, scales);
36911}
36912at::Tensor upsample_nearest1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
36913return wrapper_CPU_upsample_nearest1d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales);
36914}
36915at::Tensor & upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales) {
36916return wrapper_CPU_upsample_nearest1d_backward_out_grad_input(grad_output, output_size, input_size, scales, grad_input);
36917}
36918at::Tensor & upsample_nearest1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
36919return wrapper_CPU_upsample_nearest1d_backward_out_grad_input(grad_output, output_size, input_size, scales, grad_input);
36920}
36921at::Tensor & upsample_nearest1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
36922return wrapper_CPU_upsample_nearest1d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales, grad_input);
36923}
36924at::Tensor & upsample_nearest1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
36925return wrapper_CPU_upsample_nearest1d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales, grad_input);
36926}
36927at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales) {
36928return wrapper_CPU__upsample_nearest_exact1d_backward(grad_output, output_size, input_size, scales);
36929}
36930at::Tensor _upsample_nearest_exact1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
36931return wrapper_CPU__upsample_nearest_exact1d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales);
36932}
36933at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales) {
36934return wrapper_CPU__upsample_nearest_exact1d_backward_out_grad_input(grad_output, output_size, input_size, scales, grad_input);
36935}
36936at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
36937return wrapper_CPU__upsample_nearest_exact1d_backward_out_grad_input(grad_output, output_size, input_size, scales, grad_input);
36938}
36939at::Tensor & _upsample_nearest_exact1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
36940return wrapper_CPU__upsample_nearest_exact1d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales, grad_input);
36941}
36942at::Tensor & _upsample_nearest_exact1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
36943return wrapper_CPU__upsample_nearest_exact1d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales, grad_input);
36944}
36945at::Tensor upsample_nearest2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36946return wrapper_CPU_upsample_nearest2d(self, output_size, scales_h, scales_w);
36947}
36948at::Tensor upsample_nearest2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36949return wrapper_CPU_upsample_nearest2d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_h, scales_w);
36950}
36951at::Tensor & upsample_nearest2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36952return wrapper_CPU_upsample_nearest2d_out_out(self, output_size, scales_h, scales_w, out);
36953}
36954at::Tensor & upsample_nearest2d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36955return wrapper_CPU_upsample_nearest2d_out_out(self, output_size, scales_h, scales_w, out);
36956}
36957at::Tensor & upsample_nearest2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36958return wrapper_CPU_upsample_nearest2d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_h, scales_w, out);
36959}
36960at::Tensor & upsample_nearest2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36961return wrapper_CPU_upsample_nearest2d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_h, scales_w, out);
36962}
36963at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36964return wrapper_CPU__upsample_nearest_exact2d(self, output_size, scales_h, scales_w);
36965}
36966at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36967return wrapper_CPU__upsample_nearest_exact2d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_h, scales_w);
36968}
36969at::Tensor & _upsample_nearest_exact2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36970return wrapper_CPU__upsample_nearest_exact2d_out_out(self, output_size, scales_h, scales_w, out);
36971}
36972at::Tensor & _upsample_nearest_exact2d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36973return wrapper_CPU__upsample_nearest_exact2d_out_out(self, output_size, scales_h, scales_w, out);
36974}
36975at::Tensor & _upsample_nearest_exact2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36976return wrapper_CPU__upsample_nearest_exact2d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_h, scales_w, out);
36977}
36978at::Tensor & _upsample_nearest_exact2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
36979return wrapper_CPU__upsample_nearest_exact2d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_h, scales_w, out);
36980}
36981at::Tensor upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36982return wrapper_CPU_upsample_nearest2d_backward(grad_output, output_size, input_size, scales_h, scales_w);
36983}
36984at::Tensor upsample_nearest2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36985return wrapper_CPU_upsample_nearest2d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_h, scales_w);
36986}
36987at::Tensor & upsample_nearest2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36988return wrapper_CPU_upsample_nearest2d_backward_out_grad_input(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
36989}
36990at::Tensor & upsample_nearest2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
36991return wrapper_CPU_upsample_nearest2d_backward_out_grad_input(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
36992}
36993at::Tensor & upsample_nearest2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
36994return wrapper_CPU_upsample_nearest2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_h, scales_w, grad_input);
36995}
36996at::Tensor & upsample_nearest2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
36997return wrapper_CPU_upsample_nearest2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_h, scales_w, grad_input);
36998}
36999at::Tensor _upsample_nearest_exact2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37000return wrapper_CPU__upsample_nearest_exact2d_backward(grad_output, output_size, input_size, scales_h, scales_w);
37001}
37002at::Tensor _upsample_nearest_exact2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37003return wrapper_CPU__upsample_nearest_exact2d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_h, scales_w);
37004}
37005at::Tensor & _upsample_nearest_exact2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37006return wrapper_CPU__upsample_nearest_exact2d_backward_out_grad_input(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
37007}
37008at::Tensor & _upsample_nearest_exact2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
37009return wrapper_CPU__upsample_nearest_exact2d_backward_out_grad_input(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
37010}
37011at::Tensor & _upsample_nearest_exact2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37012return wrapper_CPU__upsample_nearest_exact2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_h, scales_w, grad_input);
37013}
37014at::Tensor & _upsample_nearest_exact2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
37015return wrapper_CPU__upsample_nearest_exact2d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_h, scales_w, grad_input);
37016}
37017at::Tensor upsample_nearest3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37018return wrapper_CPU_upsample_nearest3d(self, output_size, scales_d, scales_h, scales_w);
37019}
37020at::Tensor upsample_nearest3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37021return wrapper_CPU_upsample_nearest3d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w);
37022}
37023at::Tensor & upsample_nearest3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37024return wrapper_CPU_upsample_nearest3d_out_out(self, output_size, scales_d, scales_h, scales_w, out);
37025}
37026at::Tensor & upsample_nearest3d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
37027return wrapper_CPU_upsample_nearest3d_out_out(self, output_size, scales_d, scales_h, scales_w, out);
37028}
37029at::Tensor & upsample_nearest3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37030return wrapper_CPU_upsample_nearest3d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w, out);
37031}
37032at::Tensor & upsample_nearest3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
37033return wrapper_CPU_upsample_nearest3d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w, out);
37034}
37035at::Tensor _upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37036return wrapper_CPU__upsample_nearest_exact3d(self, output_size, scales_d, scales_h, scales_w);
37037}
37038at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37039return wrapper_CPU__upsample_nearest_exact3d(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w);
37040}
37041at::Tensor & _upsample_nearest_exact3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37042return wrapper_CPU__upsample_nearest_exact3d_out_out(self, output_size, scales_d, scales_h, scales_w, out);
37043}
37044at::Tensor & _upsample_nearest_exact3d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
37045return wrapper_CPU__upsample_nearest_exact3d_out_out(self, output_size, scales_d, scales_h, scales_w, out);
37046}
37047at::Tensor & _upsample_nearest_exact3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37048return wrapper_CPU__upsample_nearest_exact3d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w, out);
37049}
37050at::Tensor & _upsample_nearest_exact3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
37051return wrapper_CPU__upsample_nearest_exact3d_out_out(self, C10_AS_INTARRAYREF_SLOW(output_size), scales_d, scales_h, scales_w, out);
37052}
37053at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37054return wrapper_CPU_upsample_nearest3d_backward(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
37055}
37056at::Tensor upsample_nearest3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37057return wrapper_CPU_upsample_nearest3d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_d, scales_h, scales_w);
37058}
37059at::Tensor & upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37060return wrapper_CPU_upsample_nearest3d_backward_out_grad_input(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
37061}
37062at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
37063return wrapper_CPU_upsample_nearest3d_backward_out_grad_input(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
37064}
37065at::Tensor & upsample_nearest3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37066return wrapper_CPU_upsample_nearest3d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_d, scales_h, scales_w, grad_input);
37067}
37068at::Tensor & upsample_nearest3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
37069return wrapper_CPU_upsample_nearest3d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_d, scales_h, scales_w, grad_input);
37070}
37071at::Tensor _upsample_nearest_exact3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37072return wrapper_CPU__upsample_nearest_exact3d_backward(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
37073}
37074at::Tensor _upsample_nearest_exact3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37075return wrapper_CPU__upsample_nearest_exact3d_backward(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_d, scales_h, scales_w);
37076}
37077at::Tensor & _upsample_nearest_exact3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37078return wrapper_CPU__upsample_nearest_exact3d_backward_out_grad_input(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
37079}
37080at::Tensor & _upsample_nearest_exact3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
37081return wrapper_CPU__upsample_nearest_exact3d_backward_out_grad_input(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
37082}
37083at::Tensor & _upsample_nearest_exact3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
37084return wrapper_CPU__upsample_nearest_exact3d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_d, scales_h, scales_w, grad_input);
37085}
37086at::Tensor & _upsample_nearest_exact3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
37087return wrapper_CPU__upsample_nearest_exact3d_backward_out_grad_input(grad_output, C10_AS_INTARRAYREF_SLOW(output_size), C10_AS_INTARRAYREF_SLOW(input_size), scales_d, scales_h, scales_w, grad_input);
37088}
37089at::Tensor sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & output) {
37090return wrapper_CPU_sigmoid_backward(grad_output, output);
37091}
37092at::Tensor & sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) {
37093return wrapper_CPU_sigmoid_backward_out_grad_input(grad_output, output, grad_input);
37094}
37095at::Tensor & sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
37096return wrapper_CPU_sigmoid_backward_out_grad_input(grad_output, output, grad_input);
37097}
37098at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps) {
37099return wrapper_CPU_logit_backward(grad_output, self, eps);
37100}
37101at::Tensor & logit_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps) {
37102return wrapper_CPU_logit_backward_out_grad_input(grad_output, self, eps, grad_input);
37103}
37104at::Tensor & logit_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps, at::Tensor & grad_input) {
37105return wrapper_CPU_logit_backward_out_grad_input(grad_output, self, eps, grad_input);
37106}
37107at::Tensor tanh_backward(const at::Tensor & grad_output, const at::Tensor & output) {
37108return wrapper_CPU_tanh_backward(grad_output, output);
37109}
37110at::Tensor & tanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) {
37111return wrapper_CPU_tanh_backward_out_grad_input(grad_output, output, grad_input);
37112}
37113at::Tensor & tanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
37114return wrapper_CPU_tanh_backward_out_grad_input(grad_output, output, grad_input);
37115}
37116at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation) {
37117return wrapper_CPU_slow_conv_transpose2d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
37118}
37119at::Tensor slow_conv_transpose2d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
37120return wrapper_CPU_slow_conv_transpose2d(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding), C10_AS_INTARRAYREF_SLOW(output_padding), dilation);
37121}
37122at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation) {
37123return wrapper_CPU_slow_conv_transpose2d_out_out(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
37124}
37125at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
37126return wrapper_CPU_slow_conv_transpose2d_out_out(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
37127}
37128at::Tensor & slow_conv_transpose2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
37129return wrapper_CPU_slow_conv_transpose2d_out_out(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding), C10_AS_INTARRAYREF_SLOW(output_padding), dilation, out);
37130}
37131at::Tensor & slow_conv_transpose2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
37132return wrapper_CPU_slow_conv_transpose2d_out_out(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding), C10_AS_INTARRAYREF_SLOW(output_padding), dilation, out);
37133}
37134at::Tensor slow_conv_transpose3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation) {
37135return wrapper_CPU__slow_conv_transpose3d(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation);
37136}
37137at::Tensor slow_conv_transpose3d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
37138return wrapper_CPU__slow_conv_transpose3d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
37139}
37140at::Tensor & slow_conv_transpose3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation) {
37141return wrapper_CPU_out_slow_conv_transpose3d_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation, out);
37142}
37143at::Tensor & slow_conv_transpose3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
37144return wrapper_CPU_out_slow_conv_transpose3d_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation, out);
37145}
37146at::Tensor & slow_conv_transpose3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
37147return wrapper_CPU_out_slow_conv_transpose3d_out(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
37148}
37149at::Tensor & slow_conv_transpose3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
37150return wrapper_CPU_out_slow_conv_transpose3d_out(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
37151}
37152at::Tensor _slow_conv2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
37153return wrapper_CPU___slow_conv2d_forward(self, weight, kernel_size, bias, stride, padding);
37154}
37155at::Tensor & _slow_conv2d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
37156return wrapper_CPU_output__slow_conv2d_forward_out(self, weight, kernel_size, bias, stride, padding, output);
37157}
37158at::Tensor & _slow_conv2d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) {
37159return wrapper_CPU_output__slow_conv2d_forward_out(self, weight, kernel_size, bias, stride, padding, output);
37160}
37161::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding) {
37162return wrapper_CPU_grad_input__slow_conv2d_backward_out(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
37163}
37164::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
37165return wrapper_CPU_grad_input__slow_conv2d_backward_out(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
37166}
37167::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
37168return wrapper_CPU_output_mask__slow_conv2d_backward(grad_output, self, weight, kernel_size, stride, padding, output_mask);
37169}
37170at::Tensor slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
37171return wrapper_CPU__slow_conv3d_forward(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding));
37172}
37173at::Tensor slow_conv3d_forward_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
37174return wrapper_CPU__slow_conv3d_forward(self, weight, kernel_size, bias, stride, padding);
37175}
37176at::Tensor & slow_conv3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
37177return wrapper_CPU_output_slow_conv3d_forward_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), output);
37178}
37179at::Tensor & slow_conv3d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) {
37180return wrapper_CPU_output_slow_conv3d_forward_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), output);
37181}
37182at::Tensor & slow_conv3d_forward_symint_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
37183return wrapper_CPU_output_slow_conv3d_forward_out(self, weight, kernel_size, bias, stride, padding, output);
37184}
37185at::Tensor & slow_conv3d_forward_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
37186return wrapper_CPU_output_slow_conv3d_forward_out(self, weight, kernel_size, bias, stride, padding, output);
37187}
37188at::Tensor slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) {
37189return wrapper_CPU__slow_conv_dilated2d(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation);
37190}
37191at::Tensor slow_conv_dilated2d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
37192return wrapper_CPU__slow_conv_dilated2d(self, weight, kernel_size, bias, stride, padding, dilation);
37193}
37194at::Tensor slow_conv_dilated3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) {
37195return wrapper_CPU__slow_conv_dilated3d(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation);
37196}
37197at::Tensor slow_conv_dilated3d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
37198return wrapper_CPU__slow_conv_dilated3d(self, weight, kernel_size, bias, stride, padding, dilation);
37199}
37200at::Tensor col2im(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
37201return wrapper_CPU__col2im(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride);
37202}
37203at::Tensor col2im_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
37204return wrapper_CPU__col2im(self, output_size, kernel_size, dilation, padding, stride);
37205}
37206at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
37207return wrapper_CPU_out_col2im_out(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out);
37208}
37209at::Tensor & col2im_outf(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
37210return wrapper_CPU_out_col2im_out(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out);
37211}
37212at::Tensor & col2im_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
37213return wrapper_CPU_out_col2im_out(self, output_size, kernel_size, dilation, padding, stride, out);
37214}
37215at::Tensor & col2im_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
37216return wrapper_CPU_out_col2im_out(self, output_size, kernel_size, dilation, padding, stride, out);
37217}
37218at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
37219return wrapper_CPU__im2col(self, kernel_size, dilation, padding, stride);
37220}
37221at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
37222return wrapper_CPU_out_im2col_out(self, kernel_size, dilation, padding, stride, out);
37223}
37224at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
37225return wrapper_CPU_out_im2col_out(self, kernel_size, dilation, padding, stride, out);
37226}
37227at::Tensor isposinf(const at::Tensor & self) {
37228return wrapper_CPU_isposinf(self);
37229}
37230at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) {
37231return wrapper_CPU_isposinf_out_out(self, out);
37232}
37233at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) {
37234return wrapper_CPU_isposinf_out_out(self, out);
37235}
37236at::Tensor isneginf(const at::Tensor & self) {
37237return wrapper_CPU_isneginf(self);
37238}
37239at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) {
37240return wrapper_CPU_isneginf_out_out(self, out);
37241}
37242at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) {
37243return wrapper_CPU_isneginf_out_out(self, out);
37244}
37245at::Tensor special_entr(const at::Tensor & self) {
37246return wrapper_CPU_special_entr(self);
37247}
37248at::Tensor & special_entr_out(at::Tensor & out, const at::Tensor & self) {
37249return wrapper_CPU_special_entr_out_out(self, out);
37250}
37251at::Tensor & special_entr_outf(const at::Tensor & self, at::Tensor & out) {
37252return wrapper_CPU_special_entr_out_out(self, out);
37253}
37254at::Tensor special_ndtri(const at::Tensor & self) {
37255return wrapper_CPU_special_ndtri(self);
37256}
37257at::Tensor & special_ndtri_out(at::Tensor & out, const at::Tensor & self) {
37258return wrapper_CPU_special_ndtri_out_out(self, out);
37259}
37260at::Tensor & special_ndtri_outf(const at::Tensor & self, at::Tensor & out) {
37261return wrapper_CPU_special_ndtri_out_out(self, out);
37262}
37263at::Tensor special_log_ndtr(const at::Tensor & self) {
37264return wrapper_CPU_special_log_ndtr(self);
37265}
37266at::Tensor & special_log_ndtr_out(at::Tensor & out, const at::Tensor & self) {
37267return wrapper_CPU_special_log_ndtr_out_out(self, out);
37268}
37269at::Tensor & special_log_ndtr_outf(const at::Tensor & self, at::Tensor & out) {
37270return wrapper_CPU_special_log_ndtr_out_out(self, out);
37271}
37272at::Tensor special_erfcx(const at::Tensor & self) {
37273return wrapper_CPU_special_erfcx(self);
37274}
37275at::Tensor & special_erfcx_out(at::Tensor & out, const at::Tensor & self) {
37276return wrapper_CPU_special_erfcx_out_out(self, out);
37277}
37278at::Tensor & special_erfcx_outf(const at::Tensor & self, at::Tensor & out) {
37279return wrapper_CPU_special_erfcx_out_out(self, out);
37280}
37281at::Tensor special_xlog1py(const at::Tensor & self, const at::Tensor & other) {
37282return wrapper_CPU_special_xlog1py(self, other);
37283}
37284at::Tensor & special_xlog1py_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
37285return wrapper_CPU_special_xlog1py_out_out(self, other, out);
37286}
37287at::Tensor & special_xlog1py_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
37288return wrapper_CPU_special_xlog1py_out_out(self, other, out);
37289}
37290at::Tensor special_zeta(const at::Tensor & self, const at::Tensor & other) {
37291return wrapper_CPU_special_zeta(self, other);
37292}
37293at::Tensor & special_zeta_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
37294return wrapper_CPU_special_zeta_out_out(self, other, out);
37295}
37296at::Tensor & special_zeta_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
37297return wrapper_CPU_special_zeta_out_out(self, other, out);
37298}
37299at::Tensor special_i0e(const at::Tensor & self) {
37300return wrapper_CPU_special_i0e(self);
37301}
37302at::Tensor & special_i0e_out(at::Tensor & out, const at::Tensor & self) {
37303return wrapper_CPU_special_i0e_out_out(self, out);
37304}
37305at::Tensor & special_i0e_outf(const at::Tensor & self, at::Tensor & out) {
37306return wrapper_CPU_special_i0e_out_out(self, out);
37307}
37308at::Tensor special_i1(const at::Tensor & self) {
37309return wrapper_CPU_special_i1(self);
37310}
37311at::Tensor & special_i1_out(at::Tensor & out, const at::Tensor & self) {
37312return wrapper_CPU_special_i1_out_out(self, out);
37313}
37314at::Tensor & special_i1_outf(const at::Tensor & self, at::Tensor & out) {
37315return wrapper_CPU_special_i1_out_out(self, out);
37316}
37317at::Tensor special_i1e(const at::Tensor & self) {
37318return wrapper_CPU_special_i1e(self);
37319}
37320at::Tensor & special_i1e_out(at::Tensor & out, const at::Tensor & self) {
37321return wrapper_CPU_special_i1e_out_out(self, out);
37322}
37323at::Tensor & special_i1e_outf(const at::Tensor & self, at::Tensor & out) {
37324return wrapper_CPU_special_i1e_out_out(self, out);
37325}
37326::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex(const at::Tensor & self, bool upper, bool check_errors) {
37327return wrapper_CPU_linalg_cholesky_ex(self, upper, check_errors);
37328}
37329::std::tuple<at::Tensor &,at::Tensor &> linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper, bool check_errors) {
37330return wrapper_CPU_linalg_cholesky_ex_out_L(self, upper, check_errors, L, info);
37331}
37332::std::tuple<at::Tensor &,at::Tensor &> linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) {
37333return wrapper_CPU_linalg_cholesky_ex_out_L(self, upper, check_errors, L, info);
37334}
37335at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim) {
37336return wrapper_CPU_linalg_cross(self, other, dim);
37337}
37338at::Tensor & linalg_cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim) {
37339return wrapper_CPU_linalg_cross_out_out(self, other, dim, out);
37340}
37341at::Tensor & linalg_cross_outf(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out) {
37342return wrapper_CPU_linalg_cross_out_out(self, other, dim, out);
37343}
37344::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex(const at::Tensor & A, bool pivot, bool check_errors) {
37345return wrapper_CPU_linalg_lu_factor_ex(A, pivot, check_errors);
37346}
37347::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_out(at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot, bool check_errors) {
37348return wrapper_CPU_linalg_lu_factor_ex_out_out(A, pivot, check_errors, LU, pivots, info);
37349}
37350::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_outf(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
37351return wrapper_CPU_linalg_lu_factor_ex_out_out(A, pivot, check_errors, LU, pivots, info);
37352}
37353::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu(const at::Tensor & A, bool pivot) {
37354return wrapper_CPU_linalg_lu(A, pivot);
37355}
37356::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot) {
37357return wrapper_CPU_linalg_lu_out_out(A, pivot, P, L, U);
37358}
37359::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
37360return wrapper_CPU_linalg_lu_out_out(A, pivot, P, L, U);
37361}
37362at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
37363return wrapper_CPU_linalg_lu_solve(LU, pivots, B, left, adjoint);
37364}
37365at::Tensor & linalg_lu_solve_out(at::Tensor & out, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
37366return wrapper_CPU_linalg_lu_solve_out_out(LU, pivots, B, left, adjoint, out);
37367}
37368at::Tensor & linalg_lu_solve_outf(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) {
37369return wrapper_CPU_linalg_lu_solve_out_out(LU, pivots, B, left, adjoint, out);
37370}
37371::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_det(const at::Tensor & A) {
37372return wrapper_CPU__linalg_det(A);
37373}
37374::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_det_out(at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A) {
37375return wrapper_CPU__linalg_det_out_result(A, result, LU, pivots);
37376}
37377::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_det_outf(const at::Tensor & A, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots) {
37378return wrapper_CPU__linalg_det_out_result(A, result, LU, pivots);
37379}
37380::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian, bool check_errors) {
37381return wrapper_CPU_linalg_ldl_factor_ex(self, hermitian, check_errors);
37382}
37383::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian, bool check_errors) {
37384return wrapper_CPU_linalg_ldl_factor_ex_out_out(self, hermitian, check_errors, LD, pivots, info);
37385}
37386::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) {
37387return wrapper_CPU_linalg_ldl_factor_ex_out_out(self, hermitian, check_errors, LD, pivots, info);
37388}
37389at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
37390return wrapper_CPU_linalg_ldl_solve(LD, pivots, B, hermitian);
37391}
37392at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
37393return wrapper_CPU_linalg_ldl_solve_out_out(LD, pivots, B, hermitian, out);
37394}
37395at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) {
37396return wrapper_CPU_linalg_ldl_solve_out_out(LD, pivots, B, hermitian, out);
37397}
37398::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> linalg_lstsq_out(at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver) {
37399return wrapper_CPU_out_linalg_lstsq_out(self, b, rcond, driver, solution, residuals, rank, singular_values);
37400}
37401::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> linalg_lstsq_outf(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) {
37402return wrapper_CPU_out_linalg_lstsq_out(self, b, rcond, driver, solution, residuals, rank, singular_values);
37403}
37404at::Tensor linalg_matrix_exp(const at::Tensor & self) {
37405return wrapper_CPU__linalg_matrix_exp(self);
37406}
37407::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet(const at::Tensor & A) {
37408return wrapper_CPU__linalg_slogdet(A);
37409}
37410::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A) {
37411return wrapper_CPU__linalg_slogdet_out_sign(A, sign, logabsdet, LU, pivots);
37412}
37413::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots) {
37414return wrapper_CPU__linalg_slogdet_out_sign(A, sign, logabsdet, LU, pivots);
37415}
37416::std::tuple<at::Tensor,at::Tensor> linalg_eig(const at::Tensor & self) {
37417return wrapper_CPU__linalg_eig(self);
37418}
37419::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & self) {
37420return wrapper_CPU_out_linalg_eig_out(self, eigenvalues, eigenvectors);
37421}
37422::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_outf(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
37423return wrapper_CPU_out_linalg_eig_out(self, eigenvalues, eigenvectors);
37424}
37425::std::tuple<at::Tensor,at::Tensor> _linalg_eigh(const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
37426return wrapper_CPU__linalg_eigh(A, UPLO, compute_v);
37427}
37428::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
37429return wrapper_CPU__linalg_eigh_out_eigenvalues(A, UPLO, compute_v, eigenvalues, eigenvectors);
37430}
37431::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_outf(const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
37432return wrapper_CPU__linalg_eigh_out_eigenvalues(A, UPLO, compute_v, eigenvalues, eigenvectors);
37433}
37434at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau) {
37435return wrapper_CPU__linalg_householder_product(input, tau);
37436}
37437at::Tensor & linalg_householder_product_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tau) {
37438return wrapper_CPU_out_linalg_householder_product_out(input, tau, out);
37439}
37440at::Tensor & linalg_householder_product_outf(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) {
37441return wrapper_CPU_out_linalg_householder_product_out(input, tau, out);
37442}
37443::std::tuple<at::Tensor,at::Tensor> linalg_inv_ex(const at::Tensor & A, bool check_errors) {
37444return wrapper_CPU_linalg_inv_ex(A, check_errors);
37445}
37446::std::tuple<at::Tensor &,at::Tensor &> linalg_inv_ex_out(at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors) {
37447return wrapper_CPU_linalg_inv_ex_out_inverse(A, check_errors, inverse, info);
37448}
37449::std::tuple<at::Tensor &,at::Tensor &> linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) {
37450return wrapper_CPU_linalg_inv_ex_out_inverse(A, check_errors, inverse, info);
37451}
37452at::Tensor linalg_vector_norm(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
37453return wrapper_CPU_linalg_vector_norm(self, ord, dim, keepdim, dtype);
37454}
37455at::Tensor & linalg_vector_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
37456return wrapper_CPU_linalg_vector_norm_out_out(self, ord, dim, keepdim, dtype, out);
37457}
37458at::Tensor & linalg_vector_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
37459return wrapper_CPU_linalg_vector_norm_out_out(self, ord, dim, keepdim, dtype, out);
37460}
37461::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver) {
37462return wrapper_CPU__linalg_svd(A, full_matrices, compute_uv, driver);
37463}
37464::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver) {
37465return wrapper_CPU__linalg_svd_out_U(A, full_matrices, compute_uv, driver, U, S, Vh);
37466}
37467::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_outf(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
37468return wrapper_CPU__linalg_svd_out_U(A, full_matrices, compute_uv, driver, U, S, Vh);
37469}
37470::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
37471return wrapper_CPU__linalg_solve_ex(A, B, left, check_errors);
37472}
37473::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_out(at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
37474return wrapper_CPU__linalg_solve_ex_out_result(A, B, left, check_errors, result, LU, pivots, info);
37475}
37476::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_outf(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
37477return wrapper_CPU__linalg_solve_ex_out_result(A, B, left, check_errors, result, LU, pivots, info);
37478}
37479::std::tuple<at::Tensor,at::Tensor> linalg_qr(const at::Tensor & A, c10::string_view mode) {
37480return wrapper_CPU_linalg_qr(A, mode);
37481}
37482::std::tuple<at::Tensor &,at::Tensor &> linalg_qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & A, c10::string_view mode) {
37483return wrapper_CPU_linalg_qr_out_out(A, mode, Q, R);
37484}
37485::std::tuple<at::Tensor &,at::Tensor &> linalg_qr_outf(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) {
37486return wrapper_CPU_linalg_qr_out_out(A, mode, Q, R);
37487}
37488at::Tensor _test_optional_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends) {
37489return wrapper_CPU___test_optional_intlist(values, addends);
37490}
37491at::Tensor _test_optional_filled_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends) {
37492return wrapper_CPU___test_optional_filled_intlist(values, addends);
37493}
37494at::Tensor _test_optional_floatlist(const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends) {
37495return wrapper_CPU___test_optional_floatlist(values, addends);
37496}
37497at::Tensor segment_reduce(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial) {
37498return wrapper_CPU__segment_reduce(data, reduce, lengths, indices, offsets, axis, unsafe, initial);
37499}
37500at::Tensor _segment_reduce_backward(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial) {
37501return wrapper_CPU___segment_reduce_backward(grad, output, data, reduce, lengths, offsets, axis, initial);
37502}
37503at::Tensor _transformer_encoder_layer_fwd(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type) {
37504return wrapper_CPU___transformer_encoder_layer_fwd(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
37505}
37506::std::tuple<at::Tensor,at::Tensor> _native_multi_head_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type) {
37507return wrapper_CPU___native_multi_head_attention(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
37508}
37509int64_t _fused_sdp_choice(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal) {
37510return wrapper_CPU___fused_sdp_choice(query, key, value, attn_mask, dropout_p, is_causal);
37511}
37512at::Tensor special_airy_ai(const at::Tensor & x) {
37513return wrapper_CPU_special_airy_ai(x);
37514}
37515at::Tensor & special_airy_ai_out(at::Tensor & out, const at::Tensor & x) {
37516return wrapper_CPU_special_airy_ai_out_out(x, out);
37517}
37518at::Tensor & special_airy_ai_outf(const at::Tensor & x, at::Tensor & out) {
37519return wrapper_CPU_special_airy_ai_out_out(x, out);
37520}
37521::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transformer_decoder_only_layer_fwd(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value) {
37522return wrapper_CPU___transformer_decoder_only_layer_fwd(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value);
37523}
37524::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_decoder_only_multi_head_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, bool need_weights, bool average_attn_weights) {
37525return wrapper_CPU___native_decoder_only_multi_head_attention(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights);
37526}
37527at::Tensor special_bessel_j0(const at::Tensor & self) {
37528return wrapper_CPU_special_bessel_j0(self);
37529}
37530at::Tensor & special_bessel_j0_out(at::Tensor & out, const at::Tensor & self) {
37531return wrapper_CPU_special_bessel_j0_out_out(self, out);
37532}
37533at::Tensor & special_bessel_j0_outf(const at::Tensor & self, at::Tensor & out) {
37534return wrapper_CPU_special_bessel_j0_out_out(self, out);
37535}
37536at::Tensor special_bessel_j1(const at::Tensor & self) {
37537return wrapper_CPU_special_bessel_j1(self);
37538}
37539at::Tensor & special_bessel_j1_out(at::Tensor & out, const at::Tensor & self) {
37540return wrapper_CPU_special_bessel_j1_out_out(self, out);
37541}
37542at::Tensor & special_bessel_j1_outf(const at::Tensor & self, at::Tensor & out) {
37543return wrapper_CPU_special_bessel_j1_out_out(self, out);
37544}
37545at::Tensor special_bessel_y0(const at::Tensor & self) {
37546return wrapper_CPU_special_bessel_y0(self);
37547}
37548at::Tensor & special_bessel_y0_out(at::Tensor & out, const at::Tensor & self) {
37549return wrapper_CPU_special_bessel_y0_out_out(self, out);
37550}
37551at::Tensor & special_bessel_y0_outf(const at::Tensor & self, at::Tensor & out) {
37552return wrapper_CPU_special_bessel_y0_out_out(self, out);
37553}
37554at::Tensor special_bessel_y1(const at::Tensor & self) {
37555return wrapper_CPU_special_bessel_y1(self);
37556}
37557at::Tensor & special_bessel_y1_out(at::Tensor & out, const at::Tensor & self) {
37558return wrapper_CPU_special_bessel_y1_out_out(self, out);
37559}
37560at::Tensor & special_bessel_y1_outf(const at::Tensor & self, at::Tensor & out) {
37561return wrapper_CPU_special_bessel_y1_out_out(self, out);
37562}
37563at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n) {
37564return wrapper_CPU_special_chebyshev_polynomial_t(x, n);
37565}
37566at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
37567return wrapper_CPU_special_chebyshev_polynomial_t_out_out(x, n, out);
37568}
37569at::Tensor & special_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
37570return wrapper_CPU_special_chebyshev_polynomial_t_out_out(x, n, out);
37571}
37572at::Tensor special_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n) {
37573return wrapper_CPU_special_chebyshev_polynomial_u(x, n);
37574}
37575at::Tensor & special_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
37576return wrapper_CPU_special_chebyshev_polynomial_u_out_out(x, n, out);
37577}
37578at::Tensor & special_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
37579return wrapper_CPU_special_chebyshev_polynomial_u_out_out(x, n, out);
37580}
37581at::Tensor special_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n) {
37582return wrapper_CPU_special_chebyshev_polynomial_v(x, n);
37583}
37584at::Tensor & special_chebyshev_polynomial_v_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
37585return wrapper_CPU_special_chebyshev_polynomial_v_out_out(x, n, out);
37586}
37587at::Tensor & special_chebyshev_polynomial_v_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
37588return wrapper_CPU_special_chebyshev_polynomial_v_out_out(x, n, out);
37589}
37590at::Tensor special_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n) {
37591return wrapper_CPU_special_chebyshev_polynomial_w(x, n);
37592}
37593at::Tensor & special_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
37594return wrapper_CPU_special_chebyshev_polynomial_w_out_out(x, n, out);
37595}
37596at::Tensor & special_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
37597return wrapper_CPU_special_chebyshev_polynomial_w_out_out(x, n, out);
37598}
37599at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Tensor & n) {
37600return wrapper_CPU_special_hermite_polynomial_h(x, n);
37601}
37602at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
37603return wrapper_CPU_special_hermite_polynomial_h_out_out(x, n, out);
37604}
37605at::Tensor & special_hermite_polynomial_h_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
37606return wrapper_CPU_special_hermite_polynomial_h_out_out(x, n, out);
37607}
37608at::Tensor special_hermite_polynomial_he(const at::Tensor & x, const at::Tensor & n) {
37609return wrapper_CPU_special_hermite_polynomial_he(x, n);
37610}
37611at::Tensor & special_hermite_polynomial_he_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
37612return wrapper_CPU_special_hermite_polynomial_he_out_out(x, n, out);
37613}
37614at::Tensor & special_hermite_polynomial_he_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
37615return wrapper_CPU_special_hermite_polynomial_he_out_out(x, n, out);
37616}
37617at::Tensor special_laguerre_polynomial_l(const at::Tensor & x, const at::Tensor & n) {
37618return wrapper_CPU_special_laguerre_polynomial_l(x, n);
37619}
37620at::Tensor & special_laguerre_polynomial_l_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
37621return wrapper_CPU_special_laguerre_polynomial_l_out_out(x, n, out);
37622}
37623at::Tensor & special_laguerre_polynomial_l_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
37624return wrapper_CPU_special_laguerre_polynomial_l_out_out(x, n, out);
37625}
37626at::Tensor special_legendre_polynomial_p(const at::Tensor & x, const at::Tensor & n) {
37627return wrapper_CPU_special_legendre_polynomial_p(x, n);
37628}
37629at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
37630return wrapper_CPU_special_legendre_polynomial_p_out_out(x, n, out);
37631}
37632at::Tensor & special_legendre_polynomial_p_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
37633return wrapper_CPU_special_legendre_polynomial_p_out_out(x, n, out);
37634}
37635at::Tensor special_modified_bessel_i0(const at::Tensor & self) {
37636return wrapper_CPU_special_modified_bessel_i0(self);
37637}
37638at::Tensor & special_modified_bessel_i0_out(at::Tensor & out, const at::Tensor & self) {
37639return wrapper_CPU_special_modified_bessel_i0_out_out(self, out);
37640}
37641at::Tensor & special_modified_bessel_i0_outf(const at::Tensor & self, at::Tensor & out) {
37642return wrapper_CPU_special_modified_bessel_i0_out_out(self, out);
37643}
37644at::Tensor special_modified_bessel_i1(const at::Tensor & self) {
37645return wrapper_CPU_special_modified_bessel_i1(self);
37646}
37647at::Tensor & special_modified_bessel_i1_out(at::Tensor & out, const at::Tensor & self) {
37648return wrapper_CPU_special_modified_bessel_i1_out_out(self, out);
37649}
37650at::Tensor & special_modified_bessel_i1_outf(const at::Tensor & self, at::Tensor & out) {
37651return wrapper_CPU_special_modified_bessel_i1_out_out(self, out);
37652}
37653at::Tensor special_modified_bessel_k0(const at::Tensor & self) {
37654return wrapper_CPU_special_modified_bessel_k0(self);
37655}
37656at::Tensor & special_modified_bessel_k0_out(at::Tensor & out, const at::Tensor & self) {
37657return wrapper_CPU_special_modified_bessel_k0_out_out(self, out);
37658}
37659at::Tensor & special_modified_bessel_k0_outf(const at::Tensor & self, at::Tensor & out) {
37660return wrapper_CPU_special_modified_bessel_k0_out_out(self, out);
37661}
37662at::Tensor special_modified_bessel_k1(const at::Tensor & self) {
37663return wrapper_CPU_special_modified_bessel_k1(self);
37664}
37665at::Tensor & special_modified_bessel_k1_out(at::Tensor & out, const at::Tensor & self) {
37666return wrapper_CPU_special_modified_bessel_k1_out_out(self, out);
37667}
37668at::Tensor & special_modified_bessel_k1_outf(const at::Tensor & self, at::Tensor & out) {
37669return wrapper_CPU_special_modified_bessel_k1_out_out(self, out);
37670}
37671at::Tensor special_scaled_modified_bessel_k0(const at::Tensor & x) {
37672return wrapper_CPU_special_scaled_modified_bessel_k0(x);
37673}
37674at::Tensor & special_scaled_modified_bessel_k0_out(at::Tensor & out, const at::Tensor & x) {
37675return wrapper_CPU_special_scaled_modified_bessel_k0_out_out(x, out);
37676}
37677at::Tensor & special_scaled_modified_bessel_k0_outf(const at::Tensor & x, at::Tensor & out) {
37678return wrapper_CPU_special_scaled_modified_bessel_k0_out_out(x, out);
37679}
37680at::Tensor special_scaled_modified_bessel_k1(const at::Tensor & x) {
37681return wrapper_CPU_special_scaled_modified_bessel_k1(x);
37682}
37683at::Tensor & special_scaled_modified_bessel_k1_out(at::Tensor & out, const at::Tensor & x) {
37684return wrapper_CPU_special_scaled_modified_bessel_k1_out_out(x, out);
37685}
37686at::Tensor & special_scaled_modified_bessel_k1_outf(const at::Tensor & x, at::Tensor & out) {
37687return wrapper_CPU_special_scaled_modified_bessel_k1_out_out(x, out);
37688}
37689at::Tensor special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n) {
37690return wrapper_CPU_special_shifted_chebyshev_polynomial_t(x, n);
37691}
37692at::Tensor & special_shifted_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
37693return wrapper_CPU_special_shifted_chebyshev_polynomial_t_out_out(x, n, out);
37694}
37695at::Tensor & special_shifted_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
37696return wrapper_CPU_special_shifted_chebyshev_polynomial_t_out_out(x, n, out);
37697}
37698at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n) {
37699return wrapper_CPU_special_shifted_chebyshev_polynomial_u(x, n);
37700}
37701at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
37702return wrapper_CPU_special_shifted_chebyshev_polynomial_u_out_out(x, n, out);
37703}
37704at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
37705return wrapper_CPU_special_shifted_chebyshev_polynomial_u_out_out(x, n, out);
37706}
37707at::Tensor special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n) {
37708return wrapper_CPU_special_shifted_chebyshev_polynomial_v(x, n);
37709}
37710at::Tensor & special_shifted_chebyshev_polynomial_v_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
37711return wrapper_CPU_special_shifted_chebyshev_polynomial_v_out_out(x, n, out);
37712}
37713at::Tensor & special_shifted_chebyshev_polynomial_v_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
37714return wrapper_CPU_special_shifted_chebyshev_polynomial_v_out_out(x, n, out);
37715}
37716at::Tensor special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n) {
37717return wrapper_CPU_special_shifted_chebyshev_polynomial_w(x, n);
37718}
37719at::Tensor & special_shifted_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
37720return wrapper_CPU_special_shifted_chebyshev_polynomial_w_out_out(x, n, out);
37721}
37722at::Tensor & special_shifted_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
37723return wrapper_CPU_special_shifted_chebyshev_polynomial_w_out_out(x, n, out);
37724}
37725at::Tensor special_spherical_bessel_j0(const at::Tensor & x) {
37726return wrapper_CPU_special_spherical_bessel_j0(x);
37727}
37728at::Tensor & special_spherical_bessel_j0_out(at::Tensor & out, const at::Tensor & x) {
37729return wrapper_CPU_special_spherical_bessel_j0_out_out(x, out);
37730}
37731at::Tensor & special_spherical_bessel_j0_outf(const at::Tensor & x, at::Tensor & out) {
37732return wrapper_CPU_special_spherical_bessel_j0_out_out(x, out);
37733}
37734at::Tensor _foobar(const at::Tensor & self, bool arg1, bool arg2, bool arg3) {
37735return wrapper_CPU___foobar(self, arg1, arg2, arg3);
37736}
37737} // namespace cpu
37738} // namespace at
37739