1// required for old g++ to compile PRId64 macros, see
2// https://github.com/pytorch/pytorch/issues/3571
3// for context
4#ifndef __STDC_FORMAT_MACROS
5#define __STDC_FORMAT_MACROS
6#endif
7
8// an external backend might generate file within its code tree
9// and check all the source files within the tree with clang-format.
10// so, disable it since the backend might have a different config.
11// clang-format off
12
13// NOTE: This condition is true for all PyTorch internal libraries, it
14// just excludes external projects such as torch_xla which
15// re-use some of the PyTorch codegen machinery.
16#if defined(CAFFE2_BUILD_MAIN_LIB) || \
17 defined(TORCH_CUDA_BUILD_MAIN_LIB) || \
18 defined(TORCH_HIP_BUILD_MAIN_LIB) || \
19 defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
20 defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
21#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
22#endif
23
24// @generated by torchgen/gen.py from RegisterDispatchKey.cpp
25
26#include <c10/core/TensorImpl.h>
27#include <c10/core/Allocator.h>
28#include <ATen/DeviceGuard.h>
29#include <ATen/NamedTensorUtils.h>
30#include <ATen/Utils.h>
31#include <ATen/WrapDimUtils.h>
32#include <ATen/Dispatch.h>
33#include <c10/util/ExclusivelyOwned.h>
34#include <c10/util/Half.h>
35#include <c10/core/UndefinedTensorImpl.h>
36#include <c10/util/Optional.h>
37#include <ATen/Tensor.h>
38#include <ATen/native/Resize.h>
39
40#include <cstddef>
41#include <functional>
42#include <memory>
43#include <utility>
44
45#include <ATen/Config.h>
46#include <ATen/core/op_registration/adaption.h>
47#include <torch/library.h>
48
49
50#include <ATen/ops/as_strided_native.h>
51#include <ATen/ops/empty.h>
52#include <ATen/ops/empty_strided.h>
53#include <ATen/ops/_copy_from_and_resize.h>
54#include <ATen/ops/_copy_from.h>
55#include <ATen/ops/_add_batch_dim_compositeimplicitautograd_dispatch.h>
56#include <ATen/ops/_add_batch_dim_native.h>
57#include <ATen/ops/_assert_tensor_metadata_compositeimplicitautograd_dispatch.h>
58#include <ATen/ops/_assert_tensor_metadata_native.h>
59#include <ATen/ops/_autocast_to_full_precision_compositeimplicitautograd_dispatch.h>
60#include <ATen/ops/_autocast_to_full_precision_native.h>
61#include <ATen/ops/_autocast_to_reduced_precision_compositeimplicitautograd_dispatch.h>
62#include <ATen/ops/_autocast_to_reduced_precision_native.h>
63#include <ATen/ops/_backward_compositeimplicitautograd_dispatch.h>
64#include <ATen/ops/_backward_native.h>
65#include <ATen/ops/_batch_norm_impl_index_backward_compositeimplicitautograd_dispatch.h>
66#include <ATen/ops/_batch_norm_impl_index_backward_native.h>
67#include <ATen/ops/_batch_norm_impl_index_compositeimplicitautograd_dispatch.h>
68#include <ATen/ops/_batch_norm_impl_index_native.h>
69#include <ATen/ops/_cast_Byte_compositeimplicitautograd_dispatch.h>
70#include <ATen/ops/_cast_Byte_native.h>
71#include <ATen/ops/_cast_Char_compositeimplicitautograd_dispatch.h>
72#include <ATen/ops/_cast_Char_native.h>
73#include <ATen/ops/_cast_Double_compositeimplicitautograd_dispatch.h>
74#include <ATen/ops/_cast_Double_native.h>
75#include <ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h>
76#include <ATen/ops/_cast_Float_native.h>
77#include <ATen/ops/_cast_Half_compositeimplicitautograd_dispatch.h>
78#include <ATen/ops/_cast_Half_native.h>
79#include <ATen/ops/_cast_Int_compositeimplicitautograd_dispatch.h>
80#include <ATen/ops/_cast_Int_native.h>
81#include <ATen/ops/_cast_Long_compositeimplicitautograd_dispatch.h>
82#include <ATen/ops/_cast_Long_native.h>
83#include <ATen/ops/_cast_Short_compositeimplicitautograd_dispatch.h>
84#include <ATen/ops/_cast_Short_native.h>
85#include <ATen/ops/_choose_qparams_per_tensor_compositeimplicitautograd_dispatch.h>
86#include <ATen/ops/_choose_qparams_per_tensor_native.h>
87#include <ATen/ops/_convolution_compositeimplicitautograd_dispatch.h>
88#include <ATen/ops/_convolution_double_backward_compositeimplicitautograd_dispatch.h>
89#include <ATen/ops/_convolution_double_backward_native.h>
90#include <ATen/ops/_convolution_mode_compositeimplicitautograd_dispatch.h>
91#include <ATen/ops/_convolution_mode_native.h>
92#include <ATen/ops/_convolution_native.h>
93#include <ATen/ops/_cufft_clear_plan_cache_compositeimplicitautograd_dispatch.h>
94#include <ATen/ops/_cufft_clear_plan_cache_native.h>
95#include <ATen/ops/_cufft_get_plan_cache_max_size_compositeimplicitautograd_dispatch.h>
96#include <ATen/ops/_cufft_get_plan_cache_max_size_native.h>
97#include <ATen/ops/_cufft_get_plan_cache_size_compositeimplicitautograd_dispatch.h>
98#include <ATen/ops/_cufft_get_plan_cache_size_native.h>
99#include <ATen/ops/_cufft_set_plan_cache_max_size_compositeimplicitautograd_dispatch.h>
100#include <ATen/ops/_cufft_set_plan_cache_max_size_native.h>
101#include <ATen/ops/_debug_has_internal_overlap_compositeimplicitautograd_dispatch.h>
102#include <ATen/ops/_debug_has_internal_overlap_native.h>
103#include <ATen/ops/_dim_arange_compositeimplicitautograd_dispatch.h>
104#include <ATen/ops/_dim_arange_native.h>
105#include <ATen/ops/_embedding_bag_backward_compositeimplicitautograd_dispatch.h>
106#include <ATen/ops/_embedding_bag_backward_native.h>
107#include <ATen/ops/_embedding_bag_sparse_backward_compositeimplicitautograd_dispatch.h>
108#include <ATen/ops/_embedding_bag_sparse_backward_native.h>
109#include <ATen/ops/_gather_sparse_backward_compositeimplicitautograd_dispatch.h>
110#include <ATen/ops/_gather_sparse_backward_native.h>
111#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_compositeimplicitautograd_dispatch.h>
112#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_native.h>
113#include <ATen/ops/_has_compatible_shallow_copy_type_compositeimplicitautograd_dispatch.h>
114#include <ATen/ops/_has_compatible_shallow_copy_type_native.h>
115#include <ATen/ops/_is_zerotensor_compositeimplicitautograd_dispatch.h>
116#include <ATen/ops/_is_zerotensor_native.h>
117#include <ATen/ops/_lu_with_info_compositeimplicitautograd_dispatch.h>
118#include <ATen/ops/_lu_with_info_native.h>
119#include <ATen/ops/_nnpack_available_compositeimplicitautograd_dispatch.h>
120#include <ATen/ops/_nnpack_available_native.h>
121#include <ATen/ops/_pack_padded_sequence_backward_compositeimplicitautograd_dispatch.h>
122#include <ATen/ops/_pack_padded_sequence_backward_native.h>
123#include <ATen/ops/_pad_circular_compositeimplicitautograd_dispatch.h>
124#include <ATen/ops/_pad_circular_native.h>
125#include <ATen/ops/_pad_enum_compositeimplicitautograd_dispatch.h>
126#include <ATen/ops/_pad_enum_native.h>
127#include <ATen/ops/_pad_packed_sequence_compositeimplicitautograd_dispatch.h>
128#include <ATen/ops/_pad_packed_sequence_native.h>
129#include <ATen/ops/_remove_batch_dim_compositeimplicitautograd_dispatch.h>
130#include <ATen/ops/_remove_batch_dim_native.h>
131#include <ATen/ops/_reshape_from_tensor_compositeimplicitautograd_dispatch.h>
132#include <ATen/ops/_reshape_from_tensor_native.h>
133#include <ATen/ops/_rowwise_prune_compositeimplicitautograd_dispatch.h>
134#include <ATen/ops/_rowwise_prune_native.h>
135#include <ATen/ops/_saturate_weight_to_fp16_compositeimplicitautograd_dispatch.h>
136#include <ATen/ops/_saturate_weight_to_fp16_native.h>
137#include <ATen/ops/_scaled_dot_product_attention_compositeimplicitautograd_dispatch.h>
138#include <ATen/ops/_scaled_dot_product_attention_math_compositeimplicitautograd_dispatch.h>
139#include <ATen/ops/_scaled_dot_product_attention_math_native.h>
140#include <ATen/ops/_scaled_dot_product_attention_native.h>
141#include <ATen/ops/_shape_as_tensor_compositeimplicitautograd_dispatch.h>
142#include <ATen/ops/_shape_as_tensor_native.h>
143#include <ATen/ops/_sobol_engine_draw_compositeimplicitautograd_dispatch.h>
144#include <ATen/ops/_sobol_engine_draw_native.h>
145#include <ATen/ops/_sobol_engine_ff_compositeimplicitautograd_dispatch.h>
146#include <ATen/ops/_sobol_engine_ff_native.h>
147#include <ATen/ops/_sobol_engine_initialize_state_compositeimplicitautograd_dispatch.h>
148#include <ATen/ops/_sobol_engine_initialize_state_native.h>
149#include <ATen/ops/_sobol_engine_scramble_compositeimplicitautograd_dispatch.h>
150#include <ATen/ops/_sobol_engine_scramble_native.h>
151#include <ATen/ops/_sparse_bsc_tensor_unsafe_compositeimplicitautograd_dispatch.h>
152#include <ATen/ops/_sparse_bsc_tensor_unsafe_native.h>
153#include <ATen/ops/_sparse_bsr_tensor_unsafe_compositeimplicitautograd_dispatch.h>
154#include <ATen/ops/_sparse_bsr_tensor_unsafe_native.h>
155#include <ATen/ops/_sparse_compressed_tensor_unsafe_compositeimplicitautograd_dispatch.h>
156#include <ATen/ops/_sparse_compressed_tensor_unsafe_native.h>
157#include <ATen/ops/_sparse_coo_tensor_unsafe_compositeimplicitautograd_dispatch.h>
158#include <ATen/ops/_sparse_coo_tensor_unsafe_native.h>
159#include <ATen/ops/_sparse_csc_tensor_unsafe_compositeimplicitautograd_dispatch.h>
160#include <ATen/ops/_sparse_csc_tensor_unsafe_native.h>
161#include <ATen/ops/_sparse_csr_tensor_unsafe_compositeimplicitautograd_dispatch.h>
162#include <ATen/ops/_sparse_csr_tensor_unsafe_native.h>
163#include <ATen/ops/_sparse_log_softmax_compositeimplicitautograd_dispatch.h>
164#include <ATen/ops/_sparse_log_softmax_native.h>
165#include <ATen/ops/_sparse_mm_compositeimplicitautograd_dispatch.h>
166#include <ATen/ops/_sparse_mm_native.h>
167#include <ATen/ops/_sparse_softmax_compositeimplicitautograd_dispatch.h>
168#include <ATen/ops/_sparse_softmax_native.h>
169#include <ATen/ops/_sparse_sum_compositeimplicitautograd_dispatch.h>
170#include <ATen/ops/_sparse_sum_native.h>
171#include <ATen/ops/_test_ambiguous_defaults_compositeimplicitautograd_dispatch.h>
172#include <ATen/ops/_test_ambiguous_defaults_native.h>
173#include <ATen/ops/_test_autograd_multiple_dispatch_compositeimplicitautograd_dispatch.h>
174#include <ATen/ops/_test_autograd_multiple_dispatch_native.h>
175#include <ATen/ops/_test_check_tensor_compositeimplicitautograd_dispatch.h>
176#include <ATen/ops/_test_check_tensor_native.h>
177#include <ATen/ops/_test_serialization_subcmul_compositeimplicitautograd_dispatch.h>
178#include <ATen/ops/_test_serialization_subcmul_native.h>
179#include <ATen/ops/_test_string_default_compositeimplicitautograd_dispatch.h>
180#include <ATen/ops/_test_string_default_native.h>
181#include <ATen/ops/_thnn_differentiable_gru_cell_backward_compositeimplicitautograd_dispatch.h>
182#include <ATen/ops/_thnn_differentiable_gru_cell_backward_native.h>
183#include <ATen/ops/_thnn_differentiable_lstm_cell_backward_compositeimplicitautograd_dispatch.h>
184#include <ATen/ops/_thnn_differentiable_lstm_cell_backward_native.h>
185#include <ATen/ops/_thnn_fused_lstm_cell_backward_compositeimplicitautograd_dispatch.h>
186#include <ATen/ops/_thnn_fused_lstm_cell_backward_native.h>
187#include <ATen/ops/_to_cpu_compositeimplicitautograd_dispatch.h>
188#include <ATen/ops/_to_cpu_native.h>
189#include <ATen/ops/_unpack_dual_compositeimplicitautograd_dispatch.h>
190#include <ATen/ops/_unpack_dual_native.h>
191#include <ATen/ops/_upsample_bicubic2d_aa_compositeimplicitautograd_dispatch.h>
192#include <ATen/ops/_upsample_bicubic2d_aa_native.h>
193#include <ATen/ops/_upsample_bilinear2d_aa_compositeimplicitautograd_dispatch.h>
194#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
195#include <ATen/ops/_upsample_nearest_exact1d_compositeimplicitautograd_dispatch.h>
196#include <ATen/ops/_upsample_nearest_exact1d_native.h>
197#include <ATen/ops/_upsample_nearest_exact2d_compositeimplicitautograd_dispatch.h>
198#include <ATen/ops/_upsample_nearest_exact2d_native.h>
199#include <ATen/ops/_upsample_nearest_exact3d_compositeimplicitautograd_dispatch.h>
200#include <ATen/ops/_upsample_nearest_exact3d_native.h>
201#include <ATen/ops/_use_cudnn_rnn_flatten_weight_compositeimplicitautograd_dispatch.h>
202#include <ATen/ops/_use_cudnn_rnn_flatten_weight_native.h>
203#include <ATen/ops/_validate_sparse_bsc_tensor_args_compositeimplicitautograd_dispatch.h>
204#include <ATen/ops/_validate_sparse_bsc_tensor_args_native.h>
205#include <ATen/ops/_validate_sparse_bsr_tensor_args_compositeimplicitautograd_dispatch.h>
206#include <ATen/ops/_validate_sparse_bsr_tensor_args_native.h>
207#include <ATen/ops/_validate_sparse_compressed_tensor_args_compositeimplicitautograd_dispatch.h>
208#include <ATen/ops/_validate_sparse_compressed_tensor_args_native.h>
209#include <ATen/ops/_validate_sparse_coo_tensor_args_compositeimplicitautograd_dispatch.h>
210#include <ATen/ops/_validate_sparse_coo_tensor_args_native.h>
211#include <ATen/ops/_validate_sparse_csc_tensor_args_compositeimplicitautograd_dispatch.h>
212#include <ATen/ops/_validate_sparse_csc_tensor_args_native.h>
213#include <ATen/ops/_validate_sparse_csr_tensor_args_compositeimplicitautograd_dispatch.h>
214#include <ATen/ops/_validate_sparse_csr_tensor_args_native.h>
215#include <ATen/ops/_version_compositeimplicitautograd_dispatch.h>
216#include <ATen/ops/_version_native.h>
217#include <ATen/ops/_weight_norm_compositeimplicitautograd_dispatch.h>
218#include <ATen/ops/_weight_norm_differentiable_backward_compositeimplicitautograd_dispatch.h>
219#include <ATen/ops/_weight_norm_differentiable_backward_native.h>
220#include <ATen/ops/_weight_norm_native.h>
221#include <ATen/ops/absolute_compositeimplicitautograd_dispatch.h>
222#include <ATen/ops/absolute_native.h>
223#include <ATen/ops/adaptive_avg_pool1d_compositeimplicitautograd_dispatch.h>
224#include <ATen/ops/adaptive_avg_pool1d_native.h>
225#include <ATen/ops/adaptive_avg_pool2d_compositeimplicitautograd_dispatch.h>
226#include <ATen/ops/adaptive_avg_pool2d_native.h>
227#include <ATen/ops/adaptive_avg_pool3d_compositeimplicitautograd_dispatch.h>
228#include <ATen/ops/adaptive_avg_pool3d_native.h>
229#include <ATen/ops/adaptive_max_pool1d_compositeimplicitautograd_dispatch.h>
230#include <ATen/ops/adaptive_max_pool1d_native.h>
231#include <ATen/ops/adjoint_compositeimplicitautograd_dispatch.h>
232#include <ATen/ops/adjoint_native.h>
233#include <ATen/ops/affine_grid_generator_backward_compositeimplicitautograd_dispatch.h>
234#include <ATen/ops/affine_grid_generator_backward_native.h>
235#include <ATen/ops/align_as_compositeimplicitautograd_dispatch.h>
236#include <ATen/ops/align_as_native.h>
237#include <ATen/ops/align_tensors_compositeimplicitautograd_dispatch.h>
238#include <ATen/ops/align_tensors_native.h>
239#include <ATen/ops/align_to_compositeimplicitautograd_dispatch.h>
240#include <ATen/ops/align_to_native.h>
241#include <ATen/ops/all_compositeimplicitautograd_dispatch.h>
242#include <ATen/ops/all_native.h>
243#include <ATen/ops/alpha_dropout_compositeimplicitautograd_dispatch.h>
244#include <ATen/ops/alpha_dropout_native.h>
245#include <ATen/ops/and_compositeimplicitautograd_dispatch.h>
246#include <ATen/ops/and_native.h>
247#include <ATen/ops/any_compositeimplicitautograd_dispatch.h>
248#include <ATen/ops/any_native.h>
249#include <ATen/ops/arccos_compositeimplicitautograd_dispatch.h>
250#include <ATen/ops/arccos_native.h>
251#include <ATen/ops/arccosh_compositeimplicitautograd_dispatch.h>
252#include <ATen/ops/arccosh_native.h>
253#include <ATen/ops/arcsin_compositeimplicitautograd_dispatch.h>
254#include <ATen/ops/arcsin_native.h>
255#include <ATen/ops/arcsinh_compositeimplicitautograd_dispatch.h>
256#include <ATen/ops/arcsinh_native.h>
257#include <ATen/ops/arctan2_compositeimplicitautograd_dispatch.h>
258#include <ATen/ops/arctan2_native.h>
259#include <ATen/ops/arctan_compositeimplicitautograd_dispatch.h>
260#include <ATen/ops/arctan_native.h>
261#include <ATen/ops/arctanh_compositeimplicitautograd_dispatch.h>
262#include <ATen/ops/arctanh_native.h>
263#include <ATen/ops/argsort_compositeimplicitautograd_dispatch.h>
264#include <ATen/ops/argsort_native.h>
265#include <ATen/ops/argwhere_compositeimplicitautograd_dispatch.h>
266#include <ATen/ops/argwhere_native.h>
267#include <ATen/ops/atleast_1d_compositeimplicitautograd_dispatch.h>
268#include <ATen/ops/atleast_1d_native.h>
269#include <ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h>
270#include <ATen/ops/atleast_2d_native.h>
271#include <ATen/ops/atleast_3d_compositeimplicitautograd_dispatch.h>
272#include <ATen/ops/atleast_3d_native.h>
273#include <ATen/ops/avg_pool1d_compositeimplicitautograd_dispatch.h>
274#include <ATen/ops/avg_pool1d_native.h>
275#include <ATen/ops/batch_norm_compositeimplicitautograd_dispatch.h>
276#include <ATen/ops/batch_norm_native.h>
277#include <ATen/ops/bilinear_compositeimplicitautograd_dispatch.h>
278#include <ATen/ops/bilinear_native.h>
279#include <ATen/ops/bitwise_and_compositeimplicitautograd_dispatch.h>
280#include <ATen/ops/bitwise_and_native.h>
281#include <ATen/ops/bitwise_or_compositeimplicitautograd_dispatch.h>
282#include <ATen/ops/bitwise_or_native.h>
283#include <ATen/ops/bitwise_xor_compositeimplicitautograd_dispatch.h>
284#include <ATen/ops/bitwise_xor_native.h>
285#include <ATen/ops/broadcast_tensors_compositeimplicitautograd_dispatch.h>
286#include <ATen/ops/broadcast_tensors_native.h>
287#include <ATen/ops/broadcast_to_compositeimplicitautograd_dispatch.h>
288#include <ATen/ops/broadcast_to_native.h>
289#include <ATen/ops/can_cast_compositeimplicitautograd_dispatch.h>
290#include <ATen/ops/can_cast_native.h>
291#include <ATen/ops/cartesian_prod_compositeimplicitautograd_dispatch.h>
292#include <ATen/ops/cartesian_prod_native.h>
293#include <ATen/ops/cat_compositeimplicitautograd_dispatch.h>
294#include <ATen/ops/cat_native.h>
295#include <ATen/ops/cdist_compositeimplicitautograd_dispatch.h>
296#include <ATen/ops/cdist_native.h>
297#include <ATen/ops/chain_matmul_compositeimplicitautograd_dispatch.h>
298#include <ATen/ops/chain_matmul_native.h>
299#include <ATen/ops/chalf_compositeimplicitautograd_dispatch.h>
300#include <ATen/ops/chalf_native.h>
301#include <ATen/ops/choose_qparams_optimized_compositeimplicitautograd_dispatch.h>
302#include <ATen/ops/choose_qparams_optimized_native.h>
303#include <ATen/ops/chunk_compositeimplicitautograd_dispatch.h>
304#include <ATen/ops/chunk_native.h>
305#include <ATen/ops/clip_compositeimplicitautograd_dispatch.h>
306#include <ATen/ops/clip_native.h>
307#include <ATen/ops/coalesce_compositeimplicitautograd_dispatch.h>
308#include <ATen/ops/coalesce_native.h>
309#include <ATen/ops/column_stack_compositeimplicitautograd_dispatch.h>
310#include <ATen/ops/column_stack_native.h>
311#include <ATen/ops/combinations_compositeimplicitautograd_dispatch.h>
312#include <ATen/ops/combinations_native.h>
313#include <ATen/ops/concat_compositeimplicitautograd_dispatch.h>
314#include <ATen/ops/concat_native.h>
315#include <ATen/ops/concatenate_compositeimplicitautograd_dispatch.h>
316#include <ATen/ops/concatenate_native.h>
317#include <ATen/ops/conj_compositeimplicitautograd_dispatch.h>
318#include <ATen/ops/conj_native.h>
319#include <ATen/ops/conj_physical_compositeimplicitautograd_dispatch.h>
320#include <ATen/ops/conj_physical_native.h>
321#include <ATen/ops/contiguous_compositeimplicitautograd_dispatch.h>
322#include <ATen/ops/contiguous_native.h>
323#include <ATen/ops/conv1d_compositeimplicitautograd_dispatch.h>
324#include <ATen/ops/conv1d_native.h>
325#include <ATen/ops/conv2d_compositeimplicitautograd_dispatch.h>
326#include <ATen/ops/conv2d_native.h>
327#include <ATen/ops/conv3d_compositeimplicitautograd_dispatch.h>
328#include <ATen/ops/conv3d_native.h>
329#include <ATen/ops/conv_tbc_backward_compositeimplicitautograd_dispatch.h>
330#include <ATen/ops/conv_tbc_backward_native.h>
331#include <ATen/ops/conv_transpose1d_compositeimplicitautograd_dispatch.h>
332#include <ATen/ops/conv_transpose1d_native.h>
333#include <ATen/ops/conv_transpose2d_compositeimplicitautograd_dispatch.h>
334#include <ATen/ops/conv_transpose2d_native.h>
335#include <ATen/ops/conv_transpose3d_compositeimplicitautograd_dispatch.h>
336#include <ATen/ops/conv_transpose3d_native.h>
337#include <ATen/ops/corrcoef_compositeimplicitautograd_dispatch.h>
338#include <ATen/ops/corrcoef_native.h>
339#include <ATen/ops/cosine_embedding_loss_compositeimplicitautograd_dispatch.h>
340#include <ATen/ops/cosine_embedding_loss_native.h>
341#include <ATen/ops/cosine_similarity_compositeimplicitautograd_dispatch.h>
342#include <ATen/ops/cosine_similarity_native.h>
343#include <ATen/ops/cov_compositeimplicitautograd_dispatch.h>
344#include <ATen/ops/cov_native.h>
345#include <ATen/ops/cross_compositeimplicitautograd_dispatch.h>
346#include <ATen/ops/cross_entropy_loss_compositeimplicitautograd_dispatch.h>
347#include <ATen/ops/cross_entropy_loss_native.h>
348#include <ATen/ops/cross_native.h>
349#include <ATen/ops/ctc_loss_compositeimplicitautograd_dispatch.h>
350#include <ATen/ops/ctc_loss_native.h>
351#include <ATen/ops/cudnn_is_acceptable_compositeimplicitautograd_dispatch.h>
352#include <ATen/ops/cudnn_is_acceptable_native.h>
353#include <ATen/ops/cummax_compositeimplicitautograd_dispatch.h>
354#include <ATen/ops/cummax_native.h>
355#include <ATen/ops/cummaxmin_backward_compositeimplicitautograd_dispatch.h>
356#include <ATen/ops/cummaxmin_backward_native.h>
357#include <ATen/ops/cummin_compositeimplicitautograd_dispatch.h>
358#include <ATen/ops/cummin_native.h>
359#include <ATen/ops/cumprod_backward_compositeimplicitautograd_dispatch.h>
360#include <ATen/ops/cumprod_backward_native.h>
361#include <ATen/ops/cumprod_compositeimplicitautograd_dispatch.h>
362#include <ATen/ops/cumprod_native.h>
363#include <ATen/ops/cumsum_compositeimplicitautograd_dispatch.h>
364#include <ATen/ops/cumsum_native.h>
365#include <ATen/ops/cumulative_trapezoid_compositeimplicitautograd_dispatch.h>
366#include <ATen/ops/cumulative_trapezoid_native.h>
367#include <ATen/ops/data_compositeimplicitautograd_dispatch.h>
368#include <ATen/ops/data_native.h>
369#include <ATen/ops/det_compositeimplicitautograd_dispatch.h>
370#include <ATen/ops/det_native.h>
371#include <ATen/ops/diag_compositeimplicitautograd_dispatch.h>
372#include <ATen/ops/diag_native.h>
373#include <ATen/ops/diagflat_compositeimplicitautograd_dispatch.h>
374#include <ATen/ops/diagflat_native.h>
375#include <ATen/ops/diagonal_compositeimplicitautograd_dispatch.h>
376#include <ATen/ops/diagonal_native.h>
377#include <ATen/ops/diff_compositeimplicitautograd_dispatch.h>
378#include <ATen/ops/diff_native.h>
379#include <ATen/ops/divide_compositeimplicitautograd_dispatch.h>
380#include <ATen/ops/divide_native.h>
381#include <ATen/ops/dropout_compositeimplicitautograd_dispatch.h>
382#include <ATen/ops/dropout_native.h>
383#include <ATen/ops/dsplit_compositeimplicitautograd_dispatch.h>
384#include <ATen/ops/dsplit_native.h>
385#include <ATen/ops/dstack_compositeimplicitautograd_dispatch.h>
386#include <ATen/ops/dstack_native.h>
387#include <ATen/ops/einsum_compositeimplicitautograd_dispatch.h>
388#include <ATen/ops/einsum_native.h>
389#include <ATen/ops/embedding_backward_compositeimplicitautograd_dispatch.h>
390#include <ATen/ops/embedding_backward_native.h>
391#include <ATen/ops/embedding_bag_compositeimplicitautograd_dispatch.h>
392#include <ATen/ops/embedding_bag_native.h>
393#include <ATen/ops/embedding_sparse_backward_compositeimplicitautograd_dispatch.h>
394#include <ATen/ops/embedding_sparse_backward_native.h>
395#include <ATen/ops/empty_compositeimplicitautograd_dispatch.h>
396#include <ATen/ops/empty_native.h>
397#include <ATen/ops/expand_as_compositeimplicitautograd_dispatch.h>
398#include <ATen/ops/expand_as_native.h>
399#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_compositeimplicitautograd_dispatch.h>
400#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_native.h>
401#include <ATen/ops/fake_quantize_per_channel_affine_compositeimplicitautograd_dispatch.h>
402#include <ATen/ops/fake_quantize_per_channel_affine_native.h>
403#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_compositeimplicitautograd_dispatch.h>
404#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_native.h>
405#include <ATen/ops/fake_quantize_per_tensor_affine_compositeimplicitautograd_dispatch.h>
406#include <ATen/ops/fake_quantize_per_tensor_affine_native.h>
407#include <ATen/ops/fbgemm_linear_fp16_weight_compositeimplicitautograd_dispatch.h>
408#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_compositeimplicitautograd_dispatch.h>
409#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_native.h>
410#include <ATen/ops/fbgemm_linear_fp16_weight_native.h>
411#include <ATen/ops/fbgemm_linear_int8_weight_compositeimplicitautograd_dispatch.h>
412#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_compositeimplicitautograd_dispatch.h>
413#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_native.h>
414#include <ATen/ops/fbgemm_linear_int8_weight_native.h>
415#include <ATen/ops/fbgemm_linear_quantize_weight_compositeimplicitautograd_dispatch.h>
416#include <ATen/ops/fbgemm_linear_quantize_weight_native.h>
417#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_compositeimplicitautograd_dispatch.h>
418#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_native.h>
419#include <ATen/ops/fbgemm_pack_quantized_matrix_compositeimplicitautograd_dispatch.h>
420#include <ATen/ops/fbgemm_pack_quantized_matrix_native.h>
421#include <ATen/ops/feature_alpha_dropout_compositeimplicitautograd_dispatch.h>
422#include <ATen/ops/feature_alpha_dropout_native.h>
423#include <ATen/ops/feature_dropout_compositeimplicitautograd_dispatch.h>
424#include <ATen/ops/feature_dropout_native.h>
425#include <ATen/ops/fft_fft2_compositeimplicitautograd_dispatch.h>
426#include <ATen/ops/fft_fft2_native.h>
427#include <ATen/ops/fft_fft_compositeimplicitautograd_dispatch.h>
428#include <ATen/ops/fft_fft_native.h>
429#include <ATen/ops/fft_fftn_compositeimplicitautograd_dispatch.h>
430#include <ATen/ops/fft_fftn_native.h>
431#include <ATen/ops/fft_fftshift_compositeimplicitautograd_dispatch.h>
432#include <ATen/ops/fft_fftshift_native.h>
433#include <ATen/ops/fft_hfft2_compositeimplicitautograd_dispatch.h>
434#include <ATen/ops/fft_hfft2_native.h>
435#include <ATen/ops/fft_hfft_compositeimplicitautograd_dispatch.h>
436#include <ATen/ops/fft_hfft_native.h>
437#include <ATen/ops/fft_hfftn_compositeimplicitautograd_dispatch.h>
438#include <ATen/ops/fft_hfftn_native.h>
439#include <ATen/ops/fft_ifft2_compositeimplicitautograd_dispatch.h>
440#include <ATen/ops/fft_ifft2_native.h>
441#include <ATen/ops/fft_ifft_compositeimplicitautograd_dispatch.h>
442#include <ATen/ops/fft_ifft_native.h>
443#include <ATen/ops/fft_ifftn_compositeimplicitautograd_dispatch.h>
444#include <ATen/ops/fft_ifftn_native.h>
445#include <ATen/ops/fft_ifftshift_compositeimplicitautograd_dispatch.h>
446#include <ATen/ops/fft_ifftshift_native.h>
447#include <ATen/ops/fft_ihfft2_compositeimplicitautograd_dispatch.h>
448#include <ATen/ops/fft_ihfft2_native.h>
449#include <ATen/ops/fft_ihfft_compositeimplicitautograd_dispatch.h>
450#include <ATen/ops/fft_ihfft_native.h>
451#include <ATen/ops/fft_ihfftn_compositeimplicitautograd_dispatch.h>
452#include <ATen/ops/fft_ihfftn_native.h>
453#include <ATen/ops/fft_irfft2_compositeimplicitautograd_dispatch.h>
454#include <ATen/ops/fft_irfft2_native.h>
455#include <ATen/ops/fft_irfft_compositeimplicitautograd_dispatch.h>
456#include <ATen/ops/fft_irfft_native.h>
457#include <ATen/ops/fft_irfftn_compositeimplicitautograd_dispatch.h>
458#include <ATen/ops/fft_irfftn_native.h>
459#include <ATen/ops/fft_rfft2_compositeimplicitautograd_dispatch.h>
460#include <ATen/ops/fft_rfft2_native.h>
461#include <ATen/ops/fft_rfft_compositeimplicitautograd_dispatch.h>
462#include <ATen/ops/fft_rfft_native.h>
463#include <ATen/ops/fft_rfftn_compositeimplicitautograd_dispatch.h>
464#include <ATen/ops/fft_rfftn_native.h>
465#include <ATen/ops/fill_diagonal_compositeimplicitautograd_dispatch.h>
466#include <ATen/ops/fill_diagonal_native.h>
467#include <ATen/ops/fix_compositeimplicitautograd_dispatch.h>
468#include <ATen/ops/fix_native.h>
469#include <ATen/ops/flatten_compositeimplicitautograd_dispatch.h>
470#include <ATen/ops/flatten_dense_tensors_compositeimplicitautograd_dispatch.h>
471#include <ATen/ops/flatten_dense_tensors_native.h>
472#include <ATen/ops/flatten_native.h>
473#include <ATen/ops/fliplr_compositeimplicitautograd_dispatch.h>
474#include <ATen/ops/fliplr_native.h>
475#include <ATen/ops/flipud_compositeimplicitautograd_dispatch.h>
476#include <ATen/ops/flipud_native.h>
477#include <ATen/ops/float_power_compositeimplicitautograd_dispatch.h>
478#include <ATen/ops/float_power_native.h>
479#include <ATen/ops/floor_divide_compositeimplicitautograd_dispatch.h>
480#include <ATen/ops/floor_divide_native.h>
481#include <ATen/ops/frobenius_norm_compositeimplicitautograd_dispatch.h>
482#include <ATen/ops/frobenius_norm_native.h>
483#include <ATen/ops/fused_moving_avg_obs_fake_quant_compositeimplicitautograd_dispatch.h>
484#include <ATen/ops/fused_moving_avg_obs_fake_quant_native.h>
485#include <ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h>
486#include <ATen/ops/gather_backward_native.h>
487#include <ATen/ops/gather_compositeimplicitautograd_dispatch.h>
488#include <ATen/ops/gather_native.h>
489#include <ATen/ops/ger_compositeimplicitautograd_dispatch.h>
490#include <ATen/ops/ger_native.h>
491#include <ATen/ops/gradient_compositeimplicitautograd_dispatch.h>
492#include <ATen/ops/gradient_native.h>
493#include <ATen/ops/greater_compositeimplicitautograd_dispatch.h>
494#include <ATen/ops/greater_equal_compositeimplicitautograd_dispatch.h>
495#include <ATen/ops/greater_equal_native.h>
496#include <ATen/ops/greater_native.h>
497#include <ATen/ops/grid_sampler_compositeimplicitautograd_dispatch.h>
498#include <ATen/ops/grid_sampler_native.h>
499#include <ATen/ops/group_norm_compositeimplicitautograd_dispatch.h>
500#include <ATen/ops/group_norm_native.h>
501#include <ATen/ops/gru_cell_compositeimplicitautograd_dispatch.h>
502#include <ATen/ops/gru_cell_native.h>
503#include <ATen/ops/gru_compositeimplicitautograd_dispatch.h>
504#include <ATen/ops/gru_native.h>
505#include <ATen/ops/hinge_embedding_loss_compositeimplicitautograd_dispatch.h>
506#include <ATen/ops/hinge_embedding_loss_native.h>
507#include <ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h>
508#include <ATen/ops/histogramdd_native.h>
509#include <ATen/ops/hsplit_compositeimplicitautograd_dispatch.h>
510#include <ATen/ops/hsplit_native.h>
511#include <ATen/ops/hstack_compositeimplicitautograd_dispatch.h>
512#include <ATen/ops/hstack_native.h>
513#include <ATen/ops/imag_compositeimplicitautograd_dispatch.h>
514#include <ATen/ops/imag_native.h>
515#include <ATen/ops/index_add_compositeimplicitautograd_dispatch.h>
516#include <ATen/ops/index_add_native.h>
517#include <ATen/ops/index_copy_compositeimplicitautograd_dispatch.h>
518#include <ATen/ops/index_copy_native.h>
519#include <ATen/ops/index_fill_compositeimplicitautograd_dispatch.h>
520#include <ATen/ops/index_fill_native.h>
521#include <ATen/ops/index_select_backward_compositeimplicitautograd_dispatch.h>
522#include <ATen/ops/index_select_backward_native.h>
523#include <ATen/ops/index_select_compositeimplicitautograd_dispatch.h>
524#include <ATen/ops/index_select_native.h>
525#include <ATen/ops/infinitely_differentiable_gelu_backward_compositeimplicitautograd_dispatch.h>
526#include <ATen/ops/infinitely_differentiable_gelu_backward_native.h>
527#include <ATen/ops/inner_compositeimplicitautograd_dispatch.h>
528#include <ATen/ops/inner_native.h>
529#include <ATen/ops/instance_norm_compositeimplicitautograd_dispatch.h>
530#include <ATen/ops/instance_norm_native.h>
531#include <ATen/ops/inverse_compositeimplicitautograd_dispatch.h>
532#include <ATen/ops/inverse_native.h>
533#include <ATen/ops/is_complex_compositeimplicitautograd_dispatch.h>
534#include <ATen/ops/is_complex_native.h>
535#include <ATen/ops/is_conj_compositeimplicitautograd_dispatch.h>
536#include <ATen/ops/is_conj_native.h>
537#include <ATen/ops/is_distributed_compositeimplicitautograd_dispatch.h>
538#include <ATen/ops/is_distributed_native.h>
539#include <ATen/ops/is_floating_point_compositeimplicitautograd_dispatch.h>
540#include <ATen/ops/is_floating_point_native.h>
541#include <ATen/ops/is_inference_compositeimplicitautograd_dispatch.h>
542#include <ATen/ops/is_inference_native.h>
543#include <ATen/ops/is_leaf_compositeimplicitautograd_dispatch.h>
544#include <ATen/ops/is_leaf_native.h>
545#include <ATen/ops/is_neg_compositeimplicitautograd_dispatch.h>
546#include <ATen/ops/is_neg_native.h>
547#include <ATen/ops/is_nonzero_compositeimplicitautograd_dispatch.h>
548#include <ATen/ops/is_nonzero_native.h>
549#include <ATen/ops/is_signed_compositeimplicitautograd_dispatch.h>
550#include <ATen/ops/is_signed_native.h>
551#include <ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h>
552#include <ATen/ops/is_vulkan_available_native.h>
553#include <ATen/ops/isclose_compositeimplicitautograd_dispatch.h>
554#include <ATen/ops/isclose_native.h>
555#include <ATen/ops/isfinite_compositeimplicitautograd_dispatch.h>
556#include <ATen/ops/isfinite_native.h>
557#include <ATen/ops/isreal_compositeimplicitautograd_dispatch.h>
558#include <ATen/ops/isreal_native.h>
559#include <ATen/ops/istft_compositeimplicitautograd_dispatch.h>
560#include <ATen/ops/istft_native.h>
561#include <ATen/ops/item_compositeimplicitautograd_dispatch.h>
562#include <ATen/ops/item_native.h>
563#include <ATen/ops/kl_div_compositeimplicitautograd_dispatch.h>
564#include <ATen/ops/kl_div_native.h>
565#include <ATen/ops/kron_compositeimplicitautograd_dispatch.h>
566#include <ATen/ops/kron_native.h>
567#include <ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h>
568#include <ATen/ops/kthvalue_native.h>
569#include <ATen/ops/l1_loss_compositeimplicitautograd_dispatch.h>
570#include <ATen/ops/l1_loss_native.h>
571#include <ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h>
572#include <ATen/ops/layer_norm_native.h>
573#include <ATen/ops/ldexp_compositeimplicitautograd_dispatch.h>
574#include <ATen/ops/ldexp_native.h>
575#include <ATen/ops/less_compositeimplicitautograd_dispatch.h>
576#include <ATen/ops/less_equal_compositeimplicitautograd_dispatch.h>
577#include <ATen/ops/less_equal_native.h>
578#include <ATen/ops/less_native.h>
579#include <ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h>
580#include <ATen/ops/linalg_cholesky_native.h>
581#include <ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h>
582#include <ATen/ops/linalg_cond_native.h>
583#include <ATen/ops/linalg_det_compositeimplicitautograd_dispatch.h>
584#include <ATen/ops/linalg_det_native.h>
585#include <ATen/ops/linalg_diagonal_compositeimplicitautograd_dispatch.h>
586#include <ATen/ops/linalg_diagonal_native.h>
587#include <ATen/ops/linalg_eigh_compositeimplicitautograd_dispatch.h>
588#include <ATen/ops/linalg_eigh_native.h>
589#include <ATen/ops/linalg_eigvals_compositeimplicitautograd_dispatch.h>
590#include <ATen/ops/linalg_eigvals_native.h>
591#include <ATen/ops/linalg_eigvalsh_compositeimplicitautograd_dispatch.h>
592#include <ATen/ops/linalg_eigvalsh_native.h>
593#include <ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h>
594#include <ATen/ops/linalg_inv_native.h>
595#include <ATen/ops/linalg_ldl_factor_compositeimplicitautograd_dispatch.h>
596#include <ATen/ops/linalg_ldl_factor_native.h>
597#include <ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h>
598#include <ATen/ops/linalg_lu_factor_native.h>
599#include <ATen/ops/linalg_matmul_compositeimplicitautograd_dispatch.h>
600#include <ATen/ops/linalg_matmul_native.h>
601#include <ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h>
602#include <ATen/ops/linalg_matrix_norm_native.h>
603#include <ATen/ops/linalg_matrix_power_compositeimplicitautograd_dispatch.h>
604#include <ATen/ops/linalg_matrix_power_native.h>
605#include <ATen/ops/linalg_matrix_rank_compositeimplicitautograd_dispatch.h>
606#include <ATen/ops/linalg_matrix_rank_native.h>
607#include <ATen/ops/linalg_multi_dot_compositeimplicitautograd_dispatch.h>
608#include <ATen/ops/linalg_multi_dot_native.h>
609#include <ATen/ops/linalg_norm_compositeimplicitautograd_dispatch.h>
610#include <ATen/ops/linalg_norm_native.h>
611#include <ATen/ops/linalg_pinv_compositeimplicitautograd_dispatch.h>
612#include <ATen/ops/linalg_pinv_native.h>
613#include <ATen/ops/linalg_slogdet_compositeimplicitautograd_dispatch.h>
614#include <ATen/ops/linalg_slogdet_native.h>
615#include <ATen/ops/linalg_solve_compositeimplicitautograd_dispatch.h>
616#include <ATen/ops/linalg_solve_ex_compositeimplicitautograd_dispatch.h>
617#include <ATen/ops/linalg_solve_ex_native.h>
618#include <ATen/ops/linalg_solve_native.h>
619#include <ATen/ops/linalg_svd_compositeimplicitautograd_dispatch.h>
620#include <ATen/ops/linalg_svd_native.h>
621#include <ATen/ops/linalg_svdvals_compositeimplicitautograd_dispatch.h>
622#include <ATen/ops/linalg_svdvals_native.h>
623#include <ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h>
624#include <ATen/ops/linalg_tensorinv_native.h>
625#include <ATen/ops/linalg_tensorsolve_compositeimplicitautograd_dispatch.h>
626#include <ATen/ops/linalg_tensorsolve_native.h>
627#include <ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h>
628#include <ATen/ops/linalg_vander_native.h>
629#include <ATen/ops/linalg_vecdot_compositeimplicitautograd_dispatch.h>
630#include <ATen/ops/linalg_vecdot_native.h>
631#include <ATen/ops/linear_compositeimplicitautograd_dispatch.h>
632#include <ATen/ops/linear_native.h>
633#include <ATen/ops/log_sigmoid_compositeimplicitautograd_dispatch.h>
634#include <ATen/ops/log_sigmoid_native.h>
635#include <ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h>
636#include <ATen/ops/log_softmax_native.h>
637#include <ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h>
638#include <ATen/ops/logcumsumexp_native.h>
639#include <ATen/ops/logdet_compositeimplicitautograd_dispatch.h>
640#include <ATen/ops/logdet_native.h>
641#include <ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h>
642#include <ATen/ops/logsumexp_native.h>
643#include <ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h>
644#include <ATen/ops/lstm_cell_native.h>
645#include <ATen/ops/lstm_compositeimplicitautograd_dispatch.h>
646#include <ATen/ops/lstm_native.h>
647#include <ATen/ops/lu_solve_compositeimplicitautograd_dispatch.h>
648#include <ATen/ops/lu_solve_native.h>
649#include <ATen/ops/mH_compositeimplicitautograd_dispatch.h>
650#include <ATen/ops/mH_native.h>
651#include <ATen/ops/mT_compositeimplicitautograd_dispatch.h>
652#include <ATen/ops/mT_native.h>
653#include <ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h>
654#include <ATen/ops/margin_ranking_loss_native.h>
655#include <ATen/ops/masked_select_backward_compositeimplicitautograd_dispatch.h>
656#include <ATen/ops/masked_select_backward_native.h>
657#include <ATen/ops/matmul_compositeimplicitautograd_dispatch.h>
658#include <ATen/ops/matmul_native.h>
659#include <ATen/ops/matrix_H_compositeimplicitautograd_dispatch.h>
660#include <ATen/ops/matrix_H_native.h>
661#include <ATen/ops/matrix_exp_backward_compositeimplicitautograd_dispatch.h>
662#include <ATen/ops/matrix_exp_backward_native.h>
663#include <ATen/ops/matrix_exp_compositeimplicitautograd_dispatch.h>
664#include <ATen/ops/matrix_exp_native.h>
665#include <ATen/ops/matrix_power_compositeimplicitautograd_dispatch.h>
666#include <ATen/ops/matrix_power_native.h>
667#include <ATen/ops/max_compositeimplicitautograd_dispatch.h>
668#include <ATen/ops/max_native.h>
669#include <ATen/ops/max_pool1d_compositeimplicitautograd_dispatch.h>
670#include <ATen/ops/max_pool1d_native.h>
671#include <ATen/ops/max_pool1d_with_indices_compositeimplicitautograd_dispatch.h>
672#include <ATen/ops/max_pool1d_with_indices_native.h>
673#include <ATen/ops/max_pool2d_compositeimplicitautograd_dispatch.h>
674#include <ATen/ops/max_pool2d_native.h>
675#include <ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h>
676#include <ATen/ops/max_pool3d_native.h>
677#include <ATen/ops/mean_compositeimplicitautograd_dispatch.h>
678#include <ATen/ops/mean_native.h>
679#include <ATen/ops/median_compositeimplicitautograd_dispatch.h>
680#include <ATen/ops/median_native.h>
681#include <ATen/ops/meshgrid_compositeimplicitautograd_dispatch.h>
682#include <ATen/ops/meshgrid_native.h>
683#include <ATen/ops/min_compositeimplicitautograd_dispatch.h>
684#include <ATen/ops/min_native.h>
685#include <ATen/ops/mish_backward_compositeimplicitautograd_dispatch.h>
686#include <ATen/ops/mish_backward_native.h>
687#include <ATen/ops/mode_compositeimplicitautograd_dispatch.h>
688#include <ATen/ops/mode_native.h>
689#include <ATen/ops/moveaxis_compositeimplicitautograd_dispatch.h>
690#include <ATen/ops/moveaxis_native.h>
691#include <ATen/ops/movedim_compositeimplicitautograd_dispatch.h>
692#include <ATen/ops/movedim_native.h>
693#include <ATen/ops/msort_compositeimplicitautograd_dispatch.h>
694#include <ATen/ops/msort_native.h>
695#include <ATen/ops/multilabel_margin_loss_compositeimplicitautograd_dispatch.h>
696#include <ATen/ops/multilabel_margin_loss_native.h>
697#include <ATen/ops/multiply_compositeimplicitautograd_dispatch.h>
698#include <ATen/ops/multiply_native.h>
699#include <ATen/ops/nanmean_compositeimplicitautograd_dispatch.h>
700#include <ATen/ops/nanmean_native.h>
701#include <ATen/ops/nanmedian_compositeimplicitautograd_dispatch.h>
702#include <ATen/ops/nanmedian_native.h>
703#include <ATen/ops/nanquantile_compositeimplicitautograd_dispatch.h>
704#include <ATen/ops/nanquantile_native.h>
705#include <ATen/ops/narrow_compositeimplicitautograd_dispatch.h>
706#include <ATen/ops/narrow_native.h>
707#include <ATen/ops/native_channel_shuffle_compositeimplicitautograd_dispatch.h>
708#include <ATen/ops/native_channel_shuffle_native.h>
709#include <ATen/ops/negative_compositeimplicitautograd_dispatch.h>
710#include <ATen/ops/negative_native.h>
711#include <ATen/ops/nested_to_padded_tensor_compositeimplicitautograd_dispatch.h>
712#include <ATen/ops/nested_to_padded_tensor_native.h>
713#include <ATen/ops/nll_loss2d_compositeimplicitautograd_dispatch.h>
714#include <ATen/ops/nll_loss2d_native.h>
715#include <ATen/ops/nll_loss_compositeimplicitautograd_dispatch.h>
716#include <ATen/ops/nll_loss_native.h>
717#include <ATen/ops/nll_loss_nd_compositeimplicitautograd_dispatch.h>
718#include <ATen/ops/nll_loss_nd_native.h>
719#include <ATen/ops/nonzero_numpy_compositeimplicitautograd_dispatch.h>
720#include <ATen/ops/nonzero_numpy_native.h>
721#include <ATen/ops/norm_compositeimplicitautograd_dispatch.h>
722#include <ATen/ops/norm_except_dim_compositeimplicitautograd_dispatch.h>
723#include <ATen/ops/norm_except_dim_native.h>
724#include <ATen/ops/norm_native.h>
725#include <ATen/ops/not_equal_compositeimplicitautograd_dispatch.h>
726#include <ATen/ops/not_equal_native.h>
727#include <ATen/ops/nuclear_norm_compositeimplicitautograd_dispatch.h>
728#include <ATen/ops/nuclear_norm_native.h>
729#include <ATen/ops/numpy_T_compositeimplicitautograd_dispatch.h>
730#include <ATen/ops/numpy_T_native.h>
731#include <ATen/ops/one_hot_compositeimplicitautograd_dispatch.h>
732#include <ATen/ops/one_hot_native.h>
733#include <ATen/ops/or_compositeimplicitautograd_dispatch.h>
734#include <ATen/ops/or_native.h>
735#include <ATen/ops/orgqr_compositeimplicitautograd_dispatch.h>
736#include <ATen/ops/orgqr_native.h>
737#include <ATen/ops/outer_compositeimplicitautograd_dispatch.h>
738#include <ATen/ops/outer_native.h>
739#include <ATen/ops/output_nr_compositeimplicitautograd_dispatch.h>
740#include <ATen/ops/output_nr_native.h>
741#include <ATen/ops/pad_compositeimplicitautograd_dispatch.h>
742#include <ATen/ops/pad_native.h>
743#include <ATen/ops/pad_sequence_compositeimplicitautograd_dispatch.h>
744#include <ATen/ops/pad_sequence_native.h>
745#include <ATen/ops/pairwise_distance_compositeimplicitautograd_dispatch.h>
746#include <ATen/ops/pairwise_distance_native.h>
747#include <ATen/ops/pdist_compositeimplicitautograd_dispatch.h>
748#include <ATen/ops/pdist_native.h>
749#include <ATen/ops/pin_memory_compositeimplicitautograd_dispatch.h>
750#include <ATen/ops/pin_memory_native.h>
751#include <ATen/ops/pinverse_compositeimplicitautograd_dispatch.h>
752#include <ATen/ops/pinverse_native.h>
753#include <ATen/ops/poisson_nll_loss_compositeimplicitautograd_dispatch.h>
754#include <ATen/ops/poisson_nll_loss_native.h>
755#include <ATen/ops/positive_compositeimplicitautograd_dispatch.h>
756#include <ATen/ops/positive_native.h>
757#include <ATen/ops/prelu_compositeimplicitautograd_dispatch.h>
758#include <ATen/ops/prelu_native.h>
759#include <ATen/ops/prod_compositeimplicitautograd_dispatch.h>
760#include <ATen/ops/prod_native.h>
761#include <ATen/ops/promote_types_compositeimplicitautograd_dispatch.h>
762#include <ATen/ops/promote_types_native.h>
763#include <ATen/ops/qr_compositeimplicitautograd_dispatch.h>
764#include <ATen/ops/qr_native.h>
765#include <ATen/ops/quantile_compositeimplicitautograd_dispatch.h>
766#include <ATen/ops/quantile_native.h>
767#include <ATen/ops/quantized_gru_cell_compositeimplicitautograd_dispatch.h>
768#include <ATen/ops/quantized_gru_cell_native.h>
769#include <ATen/ops/quantized_lstm_cell_compositeimplicitautograd_dispatch.h>
770#include <ATen/ops/quantized_lstm_cell_native.h>
771#include <ATen/ops/quantized_rnn_relu_cell_compositeimplicitautograd_dispatch.h>
772#include <ATen/ops/quantized_rnn_relu_cell_native.h>
773#include <ATen/ops/quantized_rnn_tanh_cell_compositeimplicitautograd_dispatch.h>
774#include <ATen/ops/quantized_rnn_tanh_cell_native.h>
775#include <ATen/ops/rand_compositeimplicitautograd_dispatch.h>
776#include <ATen/ops/rand_native.h>
777#include <ATen/ops/randn_compositeimplicitautograd_dispatch.h>
778#include <ATen/ops/randn_native.h>
779#include <ATen/ops/ravel_compositeimplicitautograd_dispatch.h>
780#include <ATen/ops/ravel_native.h>
781#include <ATen/ops/real_compositeimplicitautograd_dispatch.h>
782#include <ATen/ops/real_native.h>
783#include <ATen/ops/refine_names_compositeimplicitautograd_dispatch.h>
784#include <ATen/ops/refine_names_native.h>
785#include <ATen/ops/relu6_compositeimplicitautograd_dispatch.h>
786#include <ATen/ops/relu6_native.h>
787#include <ATen/ops/rename_compositeimplicitautograd_dispatch.h>
788#include <ATen/ops/rename_native.h>
789#include <ATen/ops/repeat_interleave_compositeimplicitautograd_dispatch.h>
790#include <ATen/ops/repeat_interleave_native.h>
791#include <ATen/ops/requires_grad_compositeimplicitautograd_dispatch.h>
792#include <ATen/ops/requires_grad_native.h>
793#include <ATen/ops/reshape_as_compositeimplicitautograd_dispatch.h>
794#include <ATen/ops/reshape_as_native.h>
795#include <ATen/ops/reshape_compositeimplicitautograd_dispatch.h>
796#include <ATen/ops/reshape_native.h>
797#include <ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h>
798#include <ATen/ops/resolve_conj_native.h>
799#include <ATen/ops/resolve_neg_compositeimplicitautograd_dispatch.h>
800#include <ATen/ops/resolve_neg_native.h>
801#include <ATen/ops/result_type_compositeimplicitautograd_dispatch.h>
802#include <ATen/ops/result_type_native.h>
803#include <ATen/ops/retain_grad_compositeimplicitautograd_dispatch.h>
804#include <ATen/ops/retain_grad_native.h>
805#include <ATen/ops/retains_grad_compositeimplicitautograd_dispatch.h>
806#include <ATen/ops/retains_grad_native.h>
807#include <ATen/ops/rnn_relu_cell_compositeimplicitautograd_dispatch.h>
808#include <ATen/ops/rnn_relu_cell_native.h>
809#include <ATen/ops/rnn_relu_compositeimplicitautograd_dispatch.h>
810#include <ATen/ops/rnn_relu_native.h>
811#include <ATen/ops/rnn_tanh_cell_compositeimplicitautograd_dispatch.h>
812#include <ATen/ops/rnn_tanh_cell_native.h>
813#include <ATen/ops/rnn_tanh_compositeimplicitautograd_dispatch.h>
814#include <ATen/ops/rnn_tanh_native.h>
815#include <ATen/ops/row_stack_compositeimplicitautograd_dispatch.h>
816#include <ATen/ops/row_stack_native.h>
817#include <ATen/ops/rrelu_compositeimplicitautograd_dispatch.h>
818#include <ATen/ops/rrelu_native.h>
819#include <ATen/ops/scaled_dot_product_attention_compositeimplicitautograd_dispatch.h>
820#include <ATen/ops/scaled_dot_product_attention_native.h>
821#include <ATen/ops/scatter_add_compositeimplicitautograd_dispatch.h>
822#include <ATen/ops/scatter_add_native.h>
823#include <ATen/ops/scatter_compositeimplicitautograd_dispatch.h>
824#include <ATen/ops/scatter_native.h>
825#include <ATen/ops/select_compositeimplicitautograd_dispatch.h>
826#include <ATen/ops/select_native.h>
827#include <ATen/ops/selu_compositeimplicitautograd_dispatch.h>
828#include <ATen/ops/selu_native.h>
829#include <ATen/ops/set_compositeimplicitautograd_dispatch.h>
830#include <ATen/ops/set_data_compositeimplicitautograd_dispatch.h>
831#include <ATen/ops/set_data_native.h>
832#include <ATen/ops/set_native.h>
833#include <ATen/ops/silu_backward_compositeimplicitautograd_dispatch.h>
834#include <ATen/ops/silu_backward_native.h>
835#include <ATen/ops/size_compositeimplicitautograd_dispatch.h>
836#include <ATen/ops/size_native.h>
837#include <ATen/ops/slogdet_compositeimplicitautograd_dispatch.h>
838#include <ATen/ops/slogdet_native.h>
839#include <ATen/ops/slow_conv3d_compositeimplicitautograd_dispatch.h>
840#include <ATen/ops/slow_conv3d_native.h>
841#include <ATen/ops/smm_compositeimplicitautograd_dispatch.h>
842#include <ATen/ops/smm_native.h>
843#include <ATen/ops/softmax_compositeimplicitautograd_dispatch.h>
844#include <ATen/ops/softmax_native.h>
845#include <ATen/ops/sort_compositeimplicitautograd_dispatch.h>
846#include <ATen/ops/sort_native.h>
847#include <ATen/ops/sparse_bsc_tensor_compositeimplicitautograd_dispatch.h>
848#include <ATen/ops/sparse_bsc_tensor_native.h>
849#include <ATen/ops/sparse_bsr_tensor_compositeimplicitautograd_dispatch.h>
850#include <ATen/ops/sparse_bsr_tensor_native.h>
851#include <ATen/ops/sparse_compressed_tensor_compositeimplicitautograd_dispatch.h>
852#include <ATen/ops/sparse_compressed_tensor_native.h>
853#include <ATen/ops/sparse_coo_tensor_compositeimplicitautograd_dispatch.h>
854#include <ATen/ops/sparse_coo_tensor_native.h>
855#include <ATen/ops/sparse_csc_tensor_compositeimplicitautograd_dispatch.h>
856#include <ATen/ops/sparse_csc_tensor_native.h>
857#include <ATen/ops/sparse_csr_tensor_compositeimplicitautograd_dispatch.h>
858#include <ATen/ops/sparse_csr_tensor_native.h>
859#include <ATen/ops/special_chebyshev_polynomial_t_compositeimplicitautograd_dispatch.h>
860#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
861#include <ATen/ops/special_chebyshev_polynomial_u_compositeimplicitautograd_dispatch.h>
862#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
863#include <ATen/ops/special_chebyshev_polynomial_v_compositeimplicitautograd_dispatch.h>
864#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
865#include <ATen/ops/special_chebyshev_polynomial_w_compositeimplicitautograd_dispatch.h>
866#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
867#include <ATen/ops/special_digamma_compositeimplicitautograd_dispatch.h>
868#include <ATen/ops/special_digamma_native.h>
869#include <ATen/ops/special_erf_compositeimplicitautograd_dispatch.h>
870#include <ATen/ops/special_erf_native.h>
871#include <ATen/ops/special_erfc_compositeimplicitautograd_dispatch.h>
872#include <ATen/ops/special_erfc_native.h>
873#include <ATen/ops/special_erfinv_compositeimplicitautograd_dispatch.h>
874#include <ATen/ops/special_erfinv_native.h>
875#include <ATen/ops/special_exp2_compositeimplicitautograd_dispatch.h>
876#include <ATen/ops/special_exp2_native.h>
877#include <ATen/ops/special_expit_compositeimplicitautograd_dispatch.h>
878#include <ATen/ops/special_expit_native.h>
879#include <ATen/ops/special_expm1_compositeimplicitautograd_dispatch.h>
880#include <ATen/ops/special_expm1_native.h>
881#include <ATen/ops/special_gammainc_compositeimplicitautograd_dispatch.h>
882#include <ATen/ops/special_gammainc_native.h>
883#include <ATen/ops/special_gammaincc_compositeimplicitautograd_dispatch.h>
884#include <ATen/ops/special_gammaincc_native.h>
885#include <ATen/ops/special_gammaln_compositeimplicitautograd_dispatch.h>
886#include <ATen/ops/special_gammaln_native.h>
887#include <ATen/ops/special_hermite_polynomial_h_compositeimplicitautograd_dispatch.h>
888#include <ATen/ops/special_hermite_polynomial_h_native.h>
889#include <ATen/ops/special_hermite_polynomial_he_compositeimplicitautograd_dispatch.h>
890#include <ATen/ops/special_hermite_polynomial_he_native.h>
891#include <ATen/ops/special_i0_compositeimplicitautograd_dispatch.h>
892#include <ATen/ops/special_i0_native.h>
893#include <ATen/ops/special_laguerre_polynomial_l_compositeimplicitautograd_dispatch.h>
894#include <ATen/ops/special_laguerre_polynomial_l_native.h>
895#include <ATen/ops/special_legendre_polynomial_p_compositeimplicitautograd_dispatch.h>
896#include <ATen/ops/special_legendre_polynomial_p_native.h>
897#include <ATen/ops/special_log1p_compositeimplicitautograd_dispatch.h>
898#include <ATen/ops/special_log1p_native.h>
899#include <ATen/ops/special_log_softmax_compositeimplicitautograd_dispatch.h>
900#include <ATen/ops/special_log_softmax_native.h>
901#include <ATen/ops/special_logit_compositeimplicitautograd_dispatch.h>
902#include <ATen/ops/special_logit_native.h>
903#include <ATen/ops/special_logsumexp_compositeimplicitautograd_dispatch.h>
904#include <ATen/ops/special_logsumexp_native.h>
905#include <ATen/ops/special_multigammaln_compositeimplicitautograd_dispatch.h>
906#include <ATen/ops/special_multigammaln_native.h>
907#include <ATen/ops/special_ndtr_compositeimplicitautograd_dispatch.h>
908#include <ATen/ops/special_ndtr_native.h>
909#include <ATen/ops/special_polygamma_compositeimplicitautograd_dispatch.h>
910#include <ATen/ops/special_polygamma_native.h>
911#include <ATen/ops/special_psi_compositeimplicitautograd_dispatch.h>
912#include <ATen/ops/special_psi_native.h>
913#include <ATen/ops/special_round_compositeimplicitautograd_dispatch.h>
914#include <ATen/ops/special_round_native.h>
915#include <ATen/ops/special_shifted_chebyshev_polynomial_t_compositeimplicitautograd_dispatch.h>
916#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
917#include <ATen/ops/special_shifted_chebyshev_polynomial_u_compositeimplicitautograd_dispatch.h>
918#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
919#include <ATen/ops/special_shifted_chebyshev_polynomial_v_compositeimplicitautograd_dispatch.h>
920#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
921#include <ATen/ops/special_shifted_chebyshev_polynomial_w_compositeimplicitautograd_dispatch.h>
922#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
923#include <ATen/ops/special_sinc_compositeimplicitautograd_dispatch.h>
924#include <ATen/ops/special_sinc_native.h>
925#include <ATen/ops/special_softmax_compositeimplicitautograd_dispatch.h>
926#include <ATen/ops/special_softmax_native.h>
927#include <ATen/ops/special_xlogy_compositeimplicitautograd_dispatch.h>
928#include <ATen/ops/special_xlogy_native.h>
929#include <ATen/ops/split_compositeimplicitautograd_dispatch.h>
930#include <ATen/ops/split_native.h>
931#include <ATen/ops/square_compositeimplicitautograd_dispatch.h>
932#include <ATen/ops/square_native.h>
933#include <ATen/ops/squeeze_compositeimplicitautograd_dispatch.h>
934#include <ATen/ops/squeeze_native.h>
935#include <ATen/ops/sspaddmm_compositeimplicitautograd_dispatch.h>
936#include <ATen/ops/sspaddmm_native.h>
937#include <ATen/ops/std_compositeimplicitautograd_dispatch.h>
938#include <ATen/ops/std_mean_compositeimplicitautograd_dispatch.h>
939#include <ATen/ops/std_mean_native.h>
940#include <ATen/ops/std_native.h>
941#include <ATen/ops/stft_compositeimplicitautograd_dispatch.h>
942#include <ATen/ops/stft_native.h>
943#include <ATen/ops/stride_compositeimplicitautograd_dispatch.h>
944#include <ATen/ops/stride_native.h>
945#include <ATen/ops/subtract_compositeimplicitautograd_dispatch.h>
946#include <ATen/ops/subtract_native.h>
947#include <ATen/ops/sum_compositeimplicitautograd_dispatch.h>
948#include <ATen/ops/sum_native.h>
949#include <ATen/ops/sum_to_size_compositeimplicitautograd_dispatch.h>
950#include <ATen/ops/sum_to_size_native.h>
951#include <ATen/ops/svd_compositeimplicitautograd_dispatch.h>
952#include <ATen/ops/svd_native.h>
953#include <ATen/ops/swapaxes_compositeimplicitautograd_dispatch.h>
954#include <ATen/ops/swapaxes_native.h>
955#include <ATen/ops/swapdims_compositeimplicitautograd_dispatch.h>
956#include <ATen/ops/swapdims_native.h>
957#include <ATen/ops/take_along_dim_compositeimplicitautograd_dispatch.h>
958#include <ATen/ops/take_along_dim_native.h>
959#include <ATen/ops/tensor_split_compositeimplicitautograd_dispatch.h>
960#include <ATen/ops/tensor_split_native.h>
961#include <ATen/ops/tensordot_compositeimplicitautograd_dispatch.h>
962#include <ATen/ops/tensordot_native.h>
963#include <ATen/ops/thnn_conv2d_compositeimplicitautograd_dispatch.h>
964#include <ATen/ops/thnn_conv2d_native.h>
965#include <ATen/ops/tile_compositeimplicitautograd_dispatch.h>
966#include <ATen/ops/tile_native.h>
967#include <ATen/ops/to_compositeimplicitautograd_dispatch.h>
968#include <ATen/ops/to_dense_backward_compositeimplicitautograd_dispatch.h>
969#include <ATen/ops/to_dense_backward_native.h>
970#include <ATen/ops/to_dense_compositeimplicitautograd_dispatch.h>
971#include <ATen/ops/to_dense_native.h>
972#include <ATen/ops/to_mkldnn_backward_compositeimplicitautograd_dispatch.h>
973#include <ATen/ops/to_mkldnn_backward_native.h>
974#include <ATen/ops/to_native.h>
975#include <ATen/ops/trace_backward_compositeimplicitautograd_dispatch.h>
976#include <ATen/ops/trace_backward_native.h>
977#include <ATen/ops/transpose_compositeimplicitautograd_dispatch.h>
978#include <ATen/ops/transpose_native.h>
979#include <ATen/ops/trapezoid_compositeimplicitautograd_dispatch.h>
980#include <ATen/ops/trapezoid_native.h>
981#include <ATen/ops/trapz_compositeimplicitautograd_dispatch.h>
982#include <ATen/ops/trapz_native.h>
983#include <ATen/ops/triplet_margin_loss_compositeimplicitautograd_dispatch.h>
984#include <ATen/ops/triplet_margin_loss_native.h>
985#include <ATen/ops/true_divide_compositeimplicitautograd_dispatch.h>
986#include <ATen/ops/true_divide_native.h>
987#include <ATen/ops/type_as_compositeimplicitautograd_dispatch.h>
988#include <ATen/ops/type_as_native.h>
989#include <ATen/ops/unbind_compositeimplicitautograd_dispatch.h>
990#include <ATen/ops/unbind_native.h>
991#include <ATen/ops/unflatten_compositeimplicitautograd_dispatch.h>
992#include <ATen/ops/unflatten_dense_tensors_compositeimplicitautograd_dispatch.h>
993#include <ATen/ops/unflatten_dense_tensors_native.h>
994#include <ATen/ops/unflatten_native.h>
995#include <ATen/ops/unsafe_chunk_compositeimplicitautograd_dispatch.h>
996#include <ATen/ops/unsafe_chunk_native.h>
997#include <ATen/ops/upsample_bicubic2d_compositeimplicitautograd_dispatch.h>
998#include <ATen/ops/upsample_bicubic2d_native.h>
999#include <ATen/ops/upsample_bilinear2d_compositeimplicitautograd_dispatch.h>
1000#include <ATen/ops/upsample_bilinear2d_native.h>
1001#include <ATen/ops/upsample_linear1d_compositeimplicitautograd_dispatch.h>
1002#include <ATen/ops/upsample_linear1d_native.h>
1003#include <ATen/ops/upsample_nearest1d_compositeimplicitautograd_dispatch.h>
1004#include <ATen/ops/upsample_nearest1d_native.h>
1005#include <ATen/ops/upsample_nearest2d_compositeimplicitautograd_dispatch.h>
1006#include <ATen/ops/upsample_nearest2d_native.h>
1007#include <ATen/ops/upsample_nearest3d_compositeimplicitautograd_dispatch.h>
1008#include <ATen/ops/upsample_nearest3d_native.h>
1009#include <ATen/ops/upsample_trilinear3d_compositeimplicitautograd_dispatch.h>
1010#include <ATen/ops/upsample_trilinear3d_native.h>
1011#include <ATen/ops/value_selecting_reduction_backward_compositeimplicitautograd_dispatch.h>
1012#include <ATen/ops/value_selecting_reduction_backward_native.h>
1013#include <ATen/ops/vander_compositeimplicitautograd_dispatch.h>
1014#include <ATen/ops/vander_native.h>
1015#include <ATen/ops/var_compositeimplicitautograd_dispatch.h>
1016#include <ATen/ops/var_mean_compositeimplicitautograd_dispatch.h>
1017#include <ATen/ops/var_mean_native.h>
1018#include <ATen/ops/var_native.h>
1019#include <ATen/ops/view_as_compositeimplicitautograd_dispatch.h>
1020#include <ATen/ops/view_as_native.h>
1021#include <ATen/ops/vsplit_compositeimplicitautograd_dispatch.h>
1022#include <ATen/ops/vsplit_native.h>
1023#include <ATen/ops/vstack_compositeimplicitautograd_dispatch.h>
1024#include <ATen/ops/vstack_native.h>
1025#include <ATen/ops/where_compositeimplicitautograd_dispatch.h>
1026#include <ATen/ops/where_native.h>
1027#include <ATen/ops/xor_compositeimplicitautograd_dispatch.h>
1028#include <ATen/ops/xor_native.h>
1029
1030// See template file RegisterDispatchDefinitions.ini
1031namespace at {
1032// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
1033// ambiguity with conflicting identifiers that may have been defined in
1034// at namespace already.
1035namespace {
1036void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
1037 TORCH_CHECK(options.dtype() == out.dtype(),
1038 "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
1039 TORCH_CHECK(options.device() == out.device(),
1040 "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
1041 const bool resized = at::native::resize_output(out, sizes);
1042 // Only restride if a resize occurred; otherwise we ignore the (advisory)
1043 // strides from the meta function and directly use the output tensor's
1044 // preexisting strides
1045 if (resized) {
1046 if (!strides.empty()) {
1047 TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
1048 // TODO: avoid the redispatch here
1049 out.as_strided_(sizes, strides);
1050 } else if (options.memory_format_opt().has_value()) {
1051 out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
1052 }
1053 }
1054}
1055void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
1056 // These checks are needed on those operators that:
1057 // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
1058 // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
1059 // For other operators (e.g. 'add'), 'TensorIterator' already checks
1060 // these things separately.
1061 TORCH_CHECK(options.dtype() == self.dtype(),
1062 "Bad in-place call: ",
1063 "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
1064 TORCH_CHECK(options.device() == self.device(),
1065 "Bad in-place call: ",
1066 "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
1067 TORCH_CHECK(sizes == self.sizes(),
1068 "Bad in-place call: ",
1069 "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
1070}
1071namespace {
1072at::Tensor wrapper_CompositeImplicitAutograd___cast_Byte(const at::Tensor & self, bool non_blocking) {
1073 // No device check
1074 // DeviceGuard omitted
1075 return at::native::_cast_Byte(self, non_blocking);
1076}
1077} // anonymous namespace
1078namespace {
1079at::Tensor wrapper_CompositeImplicitAutograd___cast_Char(const at::Tensor & self, bool non_blocking) {
1080 // No device check
1081 // DeviceGuard omitted
1082 return at::native::_cast_Char(self, non_blocking);
1083}
1084} // anonymous namespace
1085namespace {
1086at::Tensor wrapper_CompositeImplicitAutograd___cast_Double(const at::Tensor & self, bool non_blocking) {
1087 // No device check
1088 // DeviceGuard omitted
1089 return at::native::_cast_Double(self, non_blocking);
1090}
1091} // anonymous namespace
1092namespace {
1093at::Tensor wrapper_CompositeImplicitAutograd___cast_Float(const at::Tensor & self, bool non_blocking) {
1094 // No device check
1095 // DeviceGuard omitted
1096 return at::native::_cast_Float(self, non_blocking);
1097}
1098} // anonymous namespace
1099namespace {
1100at::Tensor wrapper_CompositeImplicitAutograd___cast_Int(const at::Tensor & self, bool non_blocking) {
1101 // No device check
1102 // DeviceGuard omitted
1103 return at::native::_cast_Int(self, non_blocking);
1104}
1105} // anonymous namespace
1106namespace {
1107at::Tensor wrapper_CompositeImplicitAutograd___cast_Long(const at::Tensor & self, bool non_blocking) {
1108 // No device check
1109 // DeviceGuard omitted
1110 return at::native::_cast_Long(self, non_blocking);
1111}
1112} // anonymous namespace
1113namespace {
1114at::Tensor wrapper_CompositeImplicitAutograd___cast_Short(const at::Tensor & self, bool non_blocking) {
1115 // No device check
1116 // DeviceGuard omitted
1117 return at::native::_cast_Short(self, non_blocking);
1118}
1119} // anonymous namespace
1120namespace {
1121at::Tensor wrapper_CompositeImplicitAutograd___cast_Half(const at::Tensor & self, bool non_blocking) {
1122 // No device check
1123 // DeviceGuard omitted
1124 return at::native::_cast_Half(self, non_blocking);
1125}
1126} // anonymous namespace
1127namespace {
1128void wrapper_CompositeImplicitAutograd___backward(const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph) {
1129 // No device check
1130 // DeviceGuard omitted
1131 return at::native::_backward(self, inputs, gradient, retain_graph, create_graph);
1132}
1133} // anonymous namespace
1134namespace {
1135void wrapper_CompositeImplicitAutograd__set_data(at::Tensor & self, const at::Tensor & new_data) {
1136 // No device check
1137 // DeviceGuard omitted
1138 return at::native::set_data(self, new_data);
1139}
1140} // anonymous namespace
1141namespace {
1142at::Tensor wrapper_CompositeImplicitAutograd__data(const at::Tensor & self) {
1143 // No device check
1144 // DeviceGuard omitted
1145 return at::native::data(self);
1146}
1147} // anonymous namespace
1148namespace {
1149bool wrapper_CompositeImplicitAutograd__is_leaf(const at::Tensor & self) {
1150 // No device check
1151 // DeviceGuard omitted
1152 return at::native::is_leaf(self);
1153}
1154} // anonymous namespace
1155namespace {
1156int64_t wrapper_CompositeImplicitAutograd__output_nr(const at::Tensor & self) {
1157 // No device check
1158 // DeviceGuard omitted
1159 return at::native::output_nr(self);
1160}
1161} // anonymous namespace
1162namespace {
1163int64_t wrapper_CompositeImplicitAutograd___version(const at::Tensor & self) {
1164 // No device check
1165 // DeviceGuard omitted
1166 return at::native::_version(self);
1167}
1168} // anonymous namespace
1169namespace {
1170at::Tensor & wrapper_CompositeImplicitAutograd__requires_grad_(at::Tensor & self, bool requires_grad) {
1171 // No device check
1172 // DeviceGuard omitted
1173 return at::native::requires_grad_(self, requires_grad);
1174}
1175} // anonymous namespace
1176namespace {
1177void wrapper_CompositeImplicitAutograd__retain_grad(at::Tensor & self) {
1178 // No device check
1179 // DeviceGuard omitted
1180 return at::native::retain_grad(self);
1181}
1182} // anonymous namespace
1183namespace {
1184bool wrapper_CompositeImplicitAutograd__retains_grad(const at::Tensor & self) {
1185 // No device check
1186 // DeviceGuard omitted
1187 return at::native::retains_grad(self);
1188}
1189} // anonymous namespace
1190namespace {
1191::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___unpack_dual(const at::Tensor & dual, int64_t level) {
1192 // No device check
1193 // DeviceGuard omitted
1194 return at::native::_unpack_dual(dual, level);
1195}
1196} // anonymous namespace
1197namespace {
1198at::Tensor & wrapper_CompositeImplicitAutograd__rename_(at::Tensor & self, c10::optional<at::DimnameList> names) {
1199 // No device check
1200 // DeviceGuard omitted
1201 return at::native::rename_(self, names);
1202}
1203} // anonymous namespace
1204namespace {
1205at::Tensor wrapper_CompositeImplicitAutograd__rename(const at::Tensor & self, c10::optional<at::DimnameList> names) {
1206 // No device check
1207 // DeviceGuard omitted
1208 return at::native::rename(self, names);
1209}
1210} // anonymous namespace
1211namespace {
1212at::Tensor wrapper_CompositeImplicitAutograd__align_to(const at::Tensor & self, at::DimnameList names) {
1213 // No device check
1214 // DeviceGuard omitted
1215 return at::native::align_to(self, names);
1216}
1217} // anonymous namespace
1218namespace {
1219at::Tensor wrapper_CompositeImplicitAutograd_ellipsis_idx_align_to(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) {
1220 // No device check
1221 // DeviceGuard omitted
1222 return at::native::align_to(self, order, ellipsis_idx);
1223}
1224} // anonymous namespace
1225namespace {
1226at::Tensor wrapper_CompositeImplicitAutograd__align_as(const at::Tensor & self, const at::Tensor & other) {
1227 // No device check
1228 // DeviceGuard omitted
1229 return at::native::align_as(self, other);
1230}
1231} // anonymous namespace
1232namespace {
1233::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__align_tensors(at::TensorList tensors) {
1234 // No device check
1235 // DeviceGuard omitted
1236 return at::native::align_tensors(tensors);
1237}
1238} // anonymous namespace
1239namespace {
1240void wrapper_CompositeImplicitAutograd___assert_tensor_metadata(const at::Tensor & a, at::OptionalIntArrayRef size, at::OptionalIntArrayRef stride, c10::optional<at::ScalarType> dtype) {
1241 // No device check
1242 // DeviceGuard omitted
1243 return at::native::_assert_tensor_metadata(a, size, stride, dtype);
1244}
1245} // anonymous namespace
1246namespace {
1247at::Tensor wrapper_CompositeImplicitAutograd__refine_names(const at::Tensor & self, at::DimnameList names) {
1248 // No device check
1249 // DeviceGuard omitted
1250 return at::native::refine_names(self, names);
1251}
1252} // anonymous namespace
1253namespace {
1254bool wrapper_CompositeImplicitAutograd___use_cudnn_rnn_flatten_weight() {
1255 // No device check
1256 // DeviceGuard omitted
1257 return at::native::_use_cudnn_rnn_flatten_weight();
1258}
1259} // anonymous namespace
1260namespace {
1261int64_t wrapper_CompositeImplicitAutograd___debug_has_internal_overlap(const at::Tensor & self) {
1262 // No device check
1263 // DeviceGuard omitted
1264 return at::native::_debug_has_internal_overlap(self);
1265}
1266} // anonymous namespace
1267namespace {
1268::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___sobol_engine_draw(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<at::ScalarType> dtype) {
1269 // No device check
1270 // DeviceGuard omitted
1271 return at::native::_sobol_engine_draw(quasi, n, sobolstate, dimension, num_generated, dtype);
1272}
1273} // anonymous namespace
1274namespace {
1275at::Tensor & wrapper_CompositeImplicitAutograd___sobol_engine_ff_(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) {
1276 // No device check
1277 // DeviceGuard omitted
1278 return at::native::_sobol_engine_ff_(self, n, sobolstate, dimension, num_generated);
1279}
1280} // anonymous namespace
1281namespace {
1282at::Tensor & wrapper_CompositeImplicitAutograd___sobol_engine_scramble_(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) {
1283 // No device check
1284 // DeviceGuard omitted
1285 return at::native::_sobol_engine_scramble_(self, ltm, dimension);
1286}
1287} // anonymous namespace
1288namespace {
1289at::Tensor & wrapper_CompositeImplicitAutograd___sobol_engine_initialize_state_(at::Tensor & self, int64_t dimension) {
1290 // No device check
1291 // DeviceGuard omitted
1292 return at::native::_sobol_engine_initialize_state_(self, dimension);
1293}
1294} // anonymous namespace
1295namespace {
1296at::Tensor wrapper_CompositeImplicitAutograd___reshape_from_tensor(const at::Tensor & self, const at::Tensor & shape) {
1297 // No device check
1298 // DeviceGuard omitted
1299 return at::native::_reshape_from_tensor(self, shape);
1300}
1301} // anonymous namespace
1302namespace {
1303at::Tensor wrapper_CompositeImplicitAutograd___shape_as_tensor(const at::Tensor & self) {
1304 // No device check
1305 // DeviceGuard omitted
1306 return at::native::_shape_as_tensor(self);
1307}
1308} // anonymous namespace
1309namespace {
1310at::Tensor wrapper_CompositeImplicitAutograd__dropout(const at::Tensor & input, double p, bool train) {
1311 // No device check
1312 // DeviceGuard omitted
1313 return at::native::dropout(input, p, train);
1314}
1315} // anonymous namespace
1316namespace {
1317at::Tensor & wrapper_CompositeImplicitAutograd__dropout_(at::Tensor & self, double p, bool train) {
1318 // No device check
1319 // DeviceGuard omitted
1320 return at::native::dropout_(self, p, train);
1321}
1322} // anonymous namespace
1323namespace {
1324at::Tensor wrapper_CompositeImplicitAutograd__feature_dropout(const at::Tensor & input, double p, bool train) {
1325 // No device check
1326 // DeviceGuard omitted
1327 return at::native::feature_dropout(input, p, train);
1328}
1329} // anonymous namespace
1330namespace {
1331at::Tensor & wrapper_CompositeImplicitAutograd__feature_dropout_(at::Tensor & self, double p, bool train) {
1332 // No device check
1333 // DeviceGuard omitted
1334 return at::native::feature_dropout_(self, p, train);
1335}
1336} // anonymous namespace
1337namespace {
1338at::Tensor wrapper_CompositeImplicitAutograd__alpha_dropout(const at::Tensor & input, double p, bool train) {
1339 // No device check
1340 // DeviceGuard omitted
1341 return at::native::alpha_dropout(input, p, train);
1342}
1343} // anonymous namespace
1344namespace {
1345at::Tensor & wrapper_CompositeImplicitAutograd__alpha_dropout_(at::Tensor & self, double p, bool train) {
1346 // No device check
1347 // DeviceGuard omitted
1348 return at::native::alpha_dropout_(self, p, train);
1349}
1350} // anonymous namespace
1351namespace {
1352at::Tensor wrapper_CompositeImplicitAutograd__feature_alpha_dropout(const at::Tensor & input, double p, bool train) {
1353 // No device check
1354 // DeviceGuard omitted
1355 return at::native::feature_alpha_dropout(input, p, train);
1356}
1357} // anonymous namespace
1358namespace {
1359at::Tensor & wrapper_CompositeImplicitAutograd__feature_alpha_dropout_(at::Tensor & self, double p, bool train) {
1360 // No device check
1361 // DeviceGuard omitted
1362 return at::native::feature_alpha_dropout_(self, p, train);
1363}
1364} // anonymous namespace
1365namespace {
1366at::Tensor wrapper_CompositeImplicitAutograd__absolute(const at::Tensor & self) {
1367 // No device check
1368 // DeviceGuard omitted
1369 return at::native::absolute(self);
1370}
1371} // anonymous namespace
1372namespace {
1373at::Tensor & wrapper_CompositeImplicitAutograd_out_absolute_out(const at::Tensor & self, at::Tensor & out) {
1374 // No device check
1375 // DeviceGuard omitted
1376 return at::native::absolute_out(self, out);
1377}
1378} // anonymous namespace
1379namespace {
1380at::Tensor & wrapper_CompositeImplicitAutograd__absolute_(at::Tensor & self) {
1381 // No device check
1382 // DeviceGuard omitted
1383 return at::native::absolute_(self);
1384}
1385} // anonymous namespace
1386namespace {
1387at::Tensor wrapper_CompositeImplicitAutograd__chalf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
1388 // No device check
1389 // DeviceGuard omitted
1390 return at::native::chalf(self, memory_format);
1391}
1392} // anonymous namespace
1393namespace {
1394at::Tensor wrapper_CompositeImplicitAutograd__real(const at::Tensor & self) {
1395 // No device check
1396 // DeviceGuard omitted
1397 return at::native::real(self);
1398}
1399} // anonymous namespace
1400namespace {
1401at::Tensor wrapper_CompositeImplicitAutograd__imag(const at::Tensor & self) {
1402 // No device check
1403 // DeviceGuard omitted
1404 return at::native::imag(self);
1405}
1406} // anonymous namespace
1407namespace {
1408at::Tensor wrapper_CompositeImplicitAutograd__conj(const at::Tensor & self) {
1409 // No device check
1410 // DeviceGuard omitted
1411 return at::native::conj(self);
1412}
1413} // anonymous namespace
1414namespace {
1415at::Tensor wrapper_CompositeImplicitAutograd__conj_physical(const at::Tensor & self) {
1416 // No device check
1417 // DeviceGuard omitted
1418 return at::native::conj_physical(self);
1419}
1420} // anonymous namespace
1421namespace {
1422at::Tensor wrapper_CompositeImplicitAutograd__resolve_conj(const at::Tensor & self) {
1423 // No device check
1424 // DeviceGuard omitted
1425 return at::native::resolve_conj(self);
1426}
1427} // anonymous namespace
1428namespace {
1429at::Tensor wrapper_CompositeImplicitAutograd__resolve_neg(const at::Tensor & self) {
1430 // No device check
1431 // DeviceGuard omitted
1432 return at::native::resolve_neg(self);
1433}
1434} // anonymous namespace
1435namespace {
1436at::Tensor wrapper_CompositeImplicitAutograd__arccos(const at::Tensor & self) {
1437 // No device check
1438 // DeviceGuard omitted
1439 return at::native::arccos(self);
1440}
1441} // anonymous namespace
1442namespace {
1443at::Tensor & wrapper_CompositeImplicitAutograd_out_arccos_out(const at::Tensor & self, at::Tensor & out) {
1444 // No device check
1445 // DeviceGuard omitted
1446 return at::native::arccos_out(self, out);
1447}
1448} // anonymous namespace
1449namespace {
1450at::Tensor & wrapper_CompositeImplicitAutograd__arccos_(at::Tensor & self) {
1451 // No device check
1452 // DeviceGuard omitted
1453 return at::native::arccos_(self);
1454}
1455} // anonymous namespace
1456namespace {
1457at::Tensor wrapper_CompositeImplicitAutograd__avg_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
1458 // No device check
1459 // DeviceGuard omitted
1460 return at::native::avg_pool1d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
1461}
1462} // anonymous namespace
1463namespace {
1464at::Tensor wrapper_CompositeImplicitAutograd__adaptive_avg_pool1d(const at::Tensor & self, at::IntArrayRef output_size) {
1465 // No device check
1466 // DeviceGuard omitted
1467 return at::native::adaptive_avg_pool1d(self, output_size);
1468}
1469} // anonymous namespace
1470namespace {
1471::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__adaptive_max_pool1d(const at::Tensor & self, at::IntArrayRef output_size) {
1472 // No device check
1473 // DeviceGuard omitted
1474 return at::native::adaptive_max_pool1d(self, output_size);
1475}
1476} // anonymous namespace
1477namespace {
1478at::Tensor wrapper_CompositeImplicitAutograd__affine_grid_generator_backward(const at::Tensor & grad, at::IntArrayRef size, bool align_corners) {
1479 // No device check
1480 // DeviceGuard omitted
1481 return at::native::affine_grid_generator_backward(grad, size, align_corners);
1482}
1483} // anonymous namespace
1484namespace {
1485at::Tensor wrapper_CompositeImplicitAutograd___test_check_tensor(const at::Tensor & self) {
1486 // No device check
1487 // DeviceGuard omitted
1488 return at::native::_test_check_tensor(self);
1489}
1490} // anonymous namespace
1491namespace {
1492at::Tensor wrapper_CompositeImplicitAutograd_dimname_all(const at::Tensor & self, at::Dimname dim, bool keepdim) {
1493 // No device check
1494 // DeviceGuard omitted
1495 return at::native::all(self, dim, keepdim);
1496}
1497} // anonymous namespace
1498namespace {
1499at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_all_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
1500 // No device check
1501 // DeviceGuard omitted
1502 return at::native::all_out(self, dim, keepdim, out);
1503}
1504} // anonymous namespace
1505namespace {
1506at::Tensor wrapper_CompositeImplicitAutograd_dimname_any(const at::Tensor & self, at::Dimname dim, bool keepdim) {
1507 // No device check
1508 // DeviceGuard omitted
1509 return at::native::any(self, dim, keepdim);
1510}
1511} // anonymous namespace
1512namespace {
1513at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_any_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
1514 // No device check
1515 // DeviceGuard omitted
1516 return at::native::any_out(self, dim, keepdim, out);
1517}
1518} // anonymous namespace
1519namespace {
1520at::Tensor wrapper_CompositeImplicitAutograd___dim_arange(const at::Tensor & like, int64_t dim) {
1521 // No device check
1522 // DeviceGuard omitted
1523 return at::native::_dim_arange(like, dim);
1524}
1525} // anonymous namespace
1526namespace {
1527at::Tensor wrapper_CompositeImplicitAutograd__arccosh(const at::Tensor & self) {
1528 // No device check
1529 // DeviceGuard omitted
1530 return at::native::arccosh(self);
1531}
1532} // anonymous namespace
1533namespace {
1534at::Tensor & wrapper_CompositeImplicitAutograd_out_arccosh_out(const at::Tensor & self, at::Tensor & out) {
1535 // No device check
1536 // DeviceGuard omitted
1537 return at::native::arccosh_out(self, out);
1538}
1539} // anonymous namespace
1540namespace {
1541at::Tensor & wrapper_CompositeImplicitAutograd__arccosh_(at::Tensor & self) {
1542 // No device check
1543 // DeviceGuard omitted
1544 return at::native::arccosh_(self);
1545}
1546} // anonymous namespace
1547namespace {
1548at::Tensor wrapper_CompositeImplicitAutograd__arcsinh(const at::Tensor & self) {
1549 // No device check
1550 // DeviceGuard omitted
1551 return at::native::arcsinh(self);
1552}
1553} // anonymous namespace
1554namespace {
1555at::Tensor & wrapper_CompositeImplicitAutograd_out_arcsinh_out(const at::Tensor & self, at::Tensor & out) {
1556 // No device check
1557 // DeviceGuard omitted
1558 return at::native::arcsinh_out(self, out);
1559}
1560} // anonymous namespace
1561namespace {
1562at::Tensor & wrapper_CompositeImplicitAutograd__arcsinh_(at::Tensor & self) {
1563 // No device check
1564 // DeviceGuard omitted
1565 return at::native::arcsinh_(self);
1566}
1567} // anonymous namespace
1568namespace {
1569at::Tensor wrapper_CompositeImplicitAutograd__arctanh(const at::Tensor & self) {
1570 // No device check
1571 // DeviceGuard omitted
1572 return at::native::arctanh(self);
1573}
1574} // anonymous namespace
1575namespace {
1576at::Tensor & wrapper_CompositeImplicitAutograd_out_arctanh_out(const at::Tensor & self, at::Tensor & out) {
1577 // No device check
1578 // DeviceGuard omitted
1579 return at::native::arctanh_out(self, out);
1580}
1581} // anonymous namespace
1582namespace {
1583at::Tensor & wrapper_CompositeImplicitAutograd__arctanh_(at::Tensor & self) {
1584 // No device check
1585 // DeviceGuard omitted
1586 return at::native::arctanh_(self);
1587}
1588} // anonymous namespace
1589namespace {
1590at::Tensor wrapper_CompositeImplicitAutograd__arcsin(const at::Tensor & self) {
1591 // No device check
1592 // DeviceGuard omitted
1593 return at::native::arcsin(self);
1594}
1595} // anonymous namespace
1596namespace {
1597at::Tensor & wrapper_CompositeImplicitAutograd_out_arcsin_out(const at::Tensor & self, at::Tensor & out) {
1598 // No device check
1599 // DeviceGuard omitted
1600 return at::native::arcsin_out(self, out);
1601}
1602} // anonymous namespace
1603namespace {
1604at::Tensor & wrapper_CompositeImplicitAutograd__arcsin_(at::Tensor & self) {
1605 // No device check
1606 // DeviceGuard omitted
1607 return at::native::arcsin_(self);
1608}
1609} // anonymous namespace
1610namespace {
1611at::Tensor wrapper_CompositeImplicitAutograd__arctan(const at::Tensor & self) {
1612 // No device check
1613 // DeviceGuard omitted
1614 return at::native::arctan(self);
1615}
1616} // anonymous namespace
1617namespace {
1618at::Tensor & wrapper_CompositeImplicitAutograd_out_arctan_out(const at::Tensor & self, at::Tensor & out) {
1619 // No device check
1620 // DeviceGuard omitted
1621 return at::native::arctan_out(self, out);
1622}
1623} // anonymous namespace
1624namespace {
1625at::Tensor & wrapper_CompositeImplicitAutograd__arctan_(at::Tensor & self) {
1626 // No device check
1627 // DeviceGuard omitted
1628 return at::native::arctan_(self);
1629}
1630} // anonymous namespace
1631namespace {
1632at::Tensor wrapper_CompositeImplicitAutograd__atleast_1d(const at::Tensor & self) {
1633 // No device check
1634 // DeviceGuard omitted
1635 return at::native::atleast_1d(self);
1636}
1637} // anonymous namespace
1638namespace {
1639::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_Sequence_atleast_1d(at::TensorList tensors) {
1640 // No device check
1641 // DeviceGuard omitted
1642 return at::native::atleast_1d(tensors);
1643}
1644} // anonymous namespace
1645namespace {
1646at::Tensor wrapper_CompositeImplicitAutograd__atleast_2d(const at::Tensor & self) {
1647 // No device check
1648 // DeviceGuard omitted
1649 return at::native::atleast_2d(self);
1650}
1651} // anonymous namespace
1652namespace {
1653::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_Sequence_atleast_2d(at::TensorList tensors) {
1654 // No device check
1655 // DeviceGuard omitted
1656 return at::native::atleast_2d(tensors);
1657}
1658} // anonymous namespace
1659namespace {
1660at::Tensor wrapper_CompositeImplicitAutograd__atleast_3d(const at::Tensor & self) {
1661 // No device check
1662 // DeviceGuard omitted
1663 return at::native::atleast_3d(self);
1664}
1665} // anonymous namespace
1666namespace {
1667::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_Sequence_atleast_3d(at::TensorList tensors) {
1668 // No device check
1669 // DeviceGuard omitted
1670 return at::native::atleast_3d(tensors);
1671}
1672} // anonymous namespace
1673namespace {
1674at::Tensor wrapper_CompositeImplicitAutograd__batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
1675 // No device check
1676 // DeviceGuard omitted
1677 return at::native::batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
1678}
1679} // anonymous namespace
1680namespace {
1681::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t> wrapper_CompositeImplicitAutograd___batch_norm_impl_index(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
1682 // No device check
1683 // DeviceGuard omitted
1684 return at::native::_batch_norm_impl_index(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
1685}
1686} // anonymous namespace
1687namespace {
1688::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___batch_norm_impl_index_backward(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
1689 // No device check
1690 // DeviceGuard omitted
1691 return at::native::_batch_norm_impl_index_backward(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
1692}
1693} // anonymous namespace
1694namespace {
1695at::Tensor wrapper_CompositeImplicitAutograd__bilinear(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
1696 // No device check
1697 // DeviceGuard omitted
1698 return at::native::bilinear(input1, input2, weight, bias);
1699}
1700} // anonymous namespace
1701namespace {
1702::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__broadcast_tensors(at::TensorList tensors) {
1703 // No device check
1704 // DeviceGuard omitted
1705 return at::native::broadcast_tensors(tensors);
1706}
1707} // anonymous namespace
1708namespace {
1709at::Tensor wrapper_CompositeImplicitAutograd__broadcast_to(const at::Tensor & self, c10::SymIntArrayRef size) {
1710 // No device check
1711 // DeviceGuard omitted
1712 return at::native::broadcast_to_symint(self, size);
1713}
1714} // anonymous namespace
1715namespace {
1716at::Tensor wrapper_CompositeImplicitAutograd_names_cat(at::TensorList tensors, at::Dimname dim) {
1717 // No device check
1718 // DeviceGuard omitted
1719 return at::native::cat(tensors, dim);
1720}
1721} // anonymous namespace
1722namespace {
1723at::Tensor & wrapper_CompositeImplicitAutograd_names_out_cat_out(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
1724 // No device check
1725 // DeviceGuard omitted
1726 return at::native::cat_out(tensors, dim, out);
1727}
1728} // anonymous namespace
1729namespace {
1730at::Tensor wrapper_CompositeImplicitAutograd__concat(at::TensorList tensors, int64_t dim) {
1731 // No device check
1732 // DeviceGuard omitted
1733 return at::native::concat(tensors, dim);
1734}
1735} // anonymous namespace
1736namespace {
1737at::Tensor & wrapper_CompositeImplicitAutograd_out_concat_out(at::TensorList tensors, int64_t dim, at::Tensor & out) {
1738 // No device check
1739 // DeviceGuard omitted
1740 return at::native::concat_out(tensors, dim, out);
1741}
1742} // anonymous namespace
1743namespace {
1744at::Tensor wrapper_CompositeImplicitAutograd_names_concat(at::TensorList tensors, at::Dimname dim) {
1745 // No device check
1746 // DeviceGuard omitted
1747 return at::native::concat(tensors, dim);
1748}
1749} // anonymous namespace
1750namespace {
1751at::Tensor & wrapper_CompositeImplicitAutograd_names_out_concat_out(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
1752 // No device check
1753 // DeviceGuard omitted
1754 return at::native::concat_out(tensors, dim, out);
1755}
1756} // anonymous namespace
1757namespace {
1758at::Tensor wrapper_CompositeImplicitAutograd__concatenate(at::TensorList tensors, int64_t dim) {
1759 // No device check
1760 // DeviceGuard omitted
1761 return at::native::concatenate(tensors, dim);
1762}
1763} // anonymous namespace
1764namespace {
1765at::Tensor & wrapper_CompositeImplicitAutograd_out_concatenate_out(at::TensorList tensors, int64_t dim, at::Tensor & out) {
1766 // No device check
1767 // DeviceGuard omitted
1768 return at::native::concatenate_out(tensors, dim, out);
1769}
1770} // anonymous namespace
1771namespace {
1772at::Tensor wrapper_CompositeImplicitAutograd_names_concatenate(at::TensorList tensors, at::Dimname dim) {
1773 // No device check
1774 // DeviceGuard omitted
1775 return at::native::concatenate(tensors, dim);
1776}
1777} // anonymous namespace
1778namespace {
1779at::Tensor & wrapper_CompositeImplicitAutograd_names_out_concatenate_out(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
1780 // No device check
1781 // DeviceGuard omitted
1782 return at::native::concatenate_out(tensors, dim, out);
1783}
1784} // anonymous namespace
1785namespace {
1786at::Tensor wrapper_CompositeImplicitAutograd__chain_matmul(at::TensorList matrices) {
1787 // No device check
1788 // DeviceGuard omitted
1789 return at::native::chain_matmul(matrices);
1790}
1791} // anonymous namespace
1792namespace {
1793at::Tensor & wrapper_CompositeImplicitAutograd_out_chain_matmul_out(at::TensorList matrices, at::Tensor & out) {
1794 // No device check
1795 // DeviceGuard omitted
1796 return at::native::chain_matmul_out(matrices, out);
1797}
1798} // anonymous namespace
1799namespace {
1800::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__unsafe_chunk(const at::Tensor & self, int64_t chunks, int64_t dim) {
1801 // No device check
1802 // DeviceGuard omitted
1803 return at::native::unsafe_chunk(self, chunks, dim);
1804}
1805} // anonymous namespace
1806namespace {
1807::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__chunk(const at::Tensor & self, int64_t chunks, int64_t dim) {
1808 // No device check
1809 // DeviceGuard omitted
1810 return at::native::chunk(self, chunks, dim);
1811}
1812} // anonymous namespace
1813namespace {
1814::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_sections_tensor_split(const at::Tensor & self, c10::SymInt sections, int64_t dim) {
1815 // No device check
1816 // DeviceGuard omitted
1817 return at::native::tensor_split_sections_symint(self, sections, dim);
1818}
1819} // anonymous namespace
1820namespace {
1821::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_indices_tensor_split(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) {
1822 // No device check
1823 // DeviceGuard omitted
1824 return at::native::tensor_split_indices_symint(self, indices, dim);
1825}
1826} // anonymous namespace
1827namespace {
1828::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_tensor_indices_or_sections_tensor_split(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) {
1829 // No device check
1830 // DeviceGuard omitted
1831 return at::native::tensor_split(self, tensor_indices_or_sections, dim);
1832}
1833} // anonymous namespace
1834namespace {
1835at::Tensor wrapper_CompositeImplicitAutograd__clip(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
1836 // No device check
1837 // DeviceGuard omitted
1838 return at::native::clip(self, min, max);
1839}
1840} // anonymous namespace
1841namespace {
1842at::Tensor & wrapper_CompositeImplicitAutograd_out_clip_out(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max, at::Tensor & out) {
1843 // No device check
1844 // DeviceGuard omitted
1845 return at::native::clip_out(self, min, max, out);
1846}
1847} // anonymous namespace
1848namespace {
1849at::Tensor & wrapper_CompositeImplicitAutograd__clip_(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
1850 // No device check
1851 // DeviceGuard omitted
1852 return at::native::clip_(self, min, max);
1853}
1854} // anonymous namespace
1855namespace {
1856at::Tensor wrapper_CompositeImplicitAutograd_Tensor_clip(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
1857 // No device check
1858 // DeviceGuard omitted
1859 return at::native::clip(self, min, max);
1860}
1861} // anonymous namespace
1862namespace {
1863at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_out_clip_out(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max, at::Tensor & out) {
1864 // No device check
1865 // DeviceGuard omitted
1866 return at::native::clip_out(self, min, max, out);
1867}
1868} // anonymous namespace
1869namespace {
1870at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_clip_(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
1871 // No device check
1872 // DeviceGuard omitted
1873 return at::native::clip_(self, min, max);
1874}
1875} // anonymous namespace
1876namespace {
1877bool wrapper_CompositeImplicitAutograd__cudnn_is_acceptable(const at::Tensor & self) {
1878 // No device check
1879 // DeviceGuard omitted
1880 return at::native::cudnn_is_acceptable(self);
1881}
1882} // anonymous namespace
1883namespace {
1884at::Tensor wrapper_CompositeImplicitAutograd__contiguous(const at::Tensor & self, at::MemoryFormat memory_format) {
1885 // No device check
1886 // DeviceGuard omitted
1887 return at::native::contiguous(self, memory_format);
1888}
1889} // anonymous namespace
1890namespace {
1891at::Tensor wrapper_CompositeImplicitAutograd_deprecated__convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) {
1892 // No device check
1893 // DeviceGuard omitted
1894 return at::native::_convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
1895}
1896} // anonymous namespace
1897namespace {
1898at::Tensor wrapper_CompositeImplicitAutograd___convolution_mode(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
1899 // No device check
1900 // DeviceGuard omitted
1901 return at::native::_convolution_mode(input, weight, bias, stride, padding, dilation, groups);
1902}
1903} // anonymous namespace
1904namespace {
1905::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___convolution_double_backward(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1906 // No device check
1907 // DeviceGuard omitted
1908 return at::native::_convolution_double_backward(ggI, ggW, ggb, gO, weight, self, stride, C10_AS_INTARRAYREF_SLOW(padding), dilation, transposed, C10_AS_INTARRAYREF_SLOW(output_padding), groups, output_mask);
1909}
1910} // anonymous namespace
1911namespace {
1912at::Tensor wrapper_CompositeImplicitAutograd__conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
1913 // No device check
1914 // DeviceGuard omitted
1915 return at::native::conv1d(input, weight, bias, stride, padding, dilation, groups);
1916}
1917} // anonymous namespace
1918namespace {
1919at::Tensor wrapper_CompositeImplicitAutograd__conv2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
1920 // No device check
1921 // DeviceGuard omitted
1922 return at::native::conv2d(input, weight, bias, stride, padding, dilation, groups);
1923}
1924} // anonymous namespace
1925namespace {
1926at::Tensor wrapper_CompositeImplicitAutograd__conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
1927 // No device check
1928 // DeviceGuard omitted
1929 return at::native::conv3d(input, weight, bias, stride, padding, dilation, groups);
1930}
1931} // anonymous namespace
1932namespace {
1933at::Tensor wrapper_CompositeImplicitAutograd_padding_conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
1934 // No device check
1935 // DeviceGuard omitted
1936 return at::native::conv1d(input, weight, bias, stride, padding, dilation, groups);
1937}
1938} // anonymous namespace
1939namespace {
1940at::Tensor wrapper_CompositeImplicitAutograd_padding_conv2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
1941 // No device check
1942 // DeviceGuard omitted
1943 return at::native::conv2d(input, weight, bias, stride, padding, dilation, groups);
1944}
1945} // anonymous namespace
1946namespace {
1947at::Tensor wrapper_CompositeImplicitAutograd_padding_conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
1948 // No device check
1949 // DeviceGuard omitted
1950 return at::native::conv3d(input, weight, bias, stride, padding, dilation, groups);
1951}
1952} // anonymous namespace
1953namespace {
1954::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__conv_tbc_backward(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
1955 // No device check
1956 // DeviceGuard omitted
1957 return at::native::conv_tbc_backward(self, input, weight, bias, pad);
1958}
1959} // anonymous namespace
1960namespace {
1961at::Tensor wrapper_CompositeImplicitAutograd__conv_transpose1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
1962 // No device check
1963 // DeviceGuard omitted
1964 return at::native::conv_transpose1d(input, weight, bias, stride, padding, output_padding, groups, dilation);
1965}
1966} // anonymous namespace
1967namespace {
1968at::Tensor wrapper_CompositeImplicitAutograd_input_conv_transpose2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
1969 // No device check
1970 // DeviceGuard omitted
1971 return at::native::conv_transpose2d(input, weight, bias, stride, padding, output_padding, groups, dilation);
1972}
1973} // anonymous namespace
1974namespace {
1975at::Tensor wrapper_CompositeImplicitAutograd_input_conv_transpose3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
1976 // No device check
1977 // DeviceGuard omitted
1978 return at::native::conv_transpose3d(input, weight, bias, stride, padding, output_padding, groups, dilation);
1979}
1980} // anonymous namespace
1981namespace {
1982at::Tensor wrapper_CompositeImplicitAutograd__cosine_embedding_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
1983 // No device check
1984 // DeviceGuard omitted
1985 return at::native::cosine_embedding_loss(input1, input2, target, margin, reduction);
1986}
1987} // anonymous namespace
1988namespace {
1989at::Tensor wrapper_CompositeImplicitAutograd__cov(const at::Tensor & self, int64_t correction, const c10::optional<at::Tensor> & fweights, const c10::optional<at::Tensor> & aweights) {
1990 // No device check
1991 // DeviceGuard omitted
1992 return at::native::cov(self, correction, fweights, aweights);
1993}
1994} // anonymous namespace
1995namespace {
1996at::Tensor wrapper_CompositeImplicitAutograd__corrcoef(const at::Tensor & self) {
1997 // No device check
1998 // DeviceGuard omitted
1999 return at::native::corrcoef(self);
2000}
2001} // anonymous namespace
2002namespace {
2003::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dimname_cummax(const at::Tensor & self, at::Dimname dim) {
2004 // No device check
2005 // DeviceGuard omitted
2006 return at::native::cummax(self, dim);
2007}
2008} // anonymous namespace
2009namespace {
2010::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_dimname_out_cummax_out(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
2011 // No device check
2012 // DeviceGuard omitted
2013 return at::native::cummax_out(self, dim, values, indices);
2014}
2015} // anonymous namespace
2016namespace {
2017::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dimname_cummin(const at::Tensor & self, at::Dimname dim) {
2018 // No device check
2019 // DeviceGuard omitted
2020 return at::native::cummin(self, dim);
2021}
2022} // anonymous namespace
2023namespace {
2024::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_dimname_out_cummin_out(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
2025 // No device check
2026 // DeviceGuard omitted
2027 return at::native::cummin_out(self, dim, values, indices);
2028}
2029} // anonymous namespace
2030namespace {
2031at::Tensor wrapper_CompositeImplicitAutograd__cummaxmin_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) {
2032 // No device check
2033 // DeviceGuard omitted
2034 return at::native::cummaxmin_backward(grad, input, indices, dim);
2035}
2036} // anonymous namespace
2037namespace {
2038at::Tensor wrapper_CompositeImplicitAutograd_dimname_cumprod(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
2039 // No device check
2040 // DeviceGuard omitted
2041 return at::native::cumprod(self, dim, dtype);
2042}
2043} // anonymous namespace
2044namespace {
2045at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_cumprod_out(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2046 // No device check
2047 // DeviceGuard omitted
2048 return at::native::cumprod_out(self, dim, dtype, out);
2049}
2050} // anonymous namespace
2051namespace {
2052at::Tensor & wrapper_CompositeImplicitAutograd_dimname_cumprod_(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
2053 // No device check
2054 // DeviceGuard omitted
2055 return at::native::cumprod_(self, dim, dtype);
2056}
2057} // anonymous namespace
2058namespace {
2059at::Tensor wrapper_CompositeImplicitAutograd__cumprod_backward(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) {
2060 // No device check
2061 // DeviceGuard omitted
2062 return at::native::cumprod_backward(grad, input, dim, output);
2063}
2064} // anonymous namespace
2065namespace {
2066at::Tensor wrapper_CompositeImplicitAutograd_dimname_cumsum(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
2067 // No device check
2068 // DeviceGuard omitted
2069 return at::native::cumsum(self, dim, dtype);
2070}
2071} // anonymous namespace
2072namespace {
2073at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_cumsum_out(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2074 // No device check
2075 // DeviceGuard omitted
2076 return at::native::cumsum_out(self, dim, dtype, out);
2077}
2078} // anonymous namespace
2079namespace {
2080at::Tensor & wrapper_CompositeImplicitAutograd_dimname_cumsum_(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
2081 // No device check
2082 // DeviceGuard omitted
2083 return at::native::cumsum_(self, dim, dtype);
2084}
2085} // anonymous namespace
2086namespace {
2087at::Tensor wrapper_CompositeImplicitAutograd_x_cumulative_trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
2088 // No device check
2089 // DeviceGuard omitted
2090 return at::native::cumulative_trapezoid(y, x, dim);
2091}
2092} // anonymous namespace
2093namespace {
2094at::Tensor wrapper_CompositeImplicitAutograd_dx_cumulative_trapezoid(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
2095 // No device check
2096 // DeviceGuard omitted
2097 return at::native::cumulative_trapezoid(y, dx, dim);
2098}
2099} // anonymous namespace
2100namespace {
2101at::Tensor wrapper_CompositeImplicitAutograd_IntList_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
2102 // No device check
2103 // DeviceGuard omitted
2104 return at::native::ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
2105}
2106} // anonymous namespace
2107namespace {
2108at::Tensor wrapper_CompositeImplicitAutograd_Tensor_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
2109 // No device check
2110 // DeviceGuard omitted
2111 return at::native::ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
2112}
2113} // anonymous namespace
2114namespace {
2115at::Tensor wrapper_CompositeImplicitAutograd__diagflat(const at::Tensor & self, int64_t offset) {
2116 // No device check
2117 // DeviceGuard omitted
2118 return at::native::diagflat(self, offset);
2119}
2120} // anonymous namespace
2121namespace {
2122at::Tensor wrapper_CompositeImplicitAutograd__linalg_diagonal(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) {
2123 // No device check
2124 // DeviceGuard omitted
2125 return at::native::linalg_diagonal(A, offset, dim1, dim2);
2126}
2127} // anonymous namespace
2128namespace {
2129at::Tensor wrapper_CompositeImplicitAutograd_Dimname_diagonal(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) {
2130 // No device check
2131 // DeviceGuard omitted
2132 return at::native::diagonal(self, outdim, dim1, dim2, offset);
2133}
2134} // anonymous namespace
2135namespace {
2136at::Tensor & wrapper_CompositeImplicitAutograd__fill_diagonal_(at::Tensor & self, const at::Scalar & fill_value, bool wrap) {
2137 // No device check
2138 // DeviceGuard omitted
2139 return at::native::fill_diagonal_(self, fill_value, wrap);
2140}
2141} // anonymous namespace
2142namespace {
2143at::Tensor wrapper_CompositeImplicitAutograd__diff(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append) {
2144 // No device check
2145 // DeviceGuard omitted
2146 return at::native::diff(self, n, dim, prepend, append);
2147}
2148} // anonymous namespace
2149namespace {
2150at::Tensor & wrapper_CompositeImplicitAutograd_out_diff_out(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append, at::Tensor & out) {
2151 // No device check
2152 // DeviceGuard omitted
2153 return at::native::diff_out(self, n, dim, prepend, append, out);
2154}
2155} // anonymous namespace
2156namespace {
2157::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_scalarint_gradient(const at::Tensor & self, const c10::optional<at::Scalar> & spacing, c10::optional<int64_t> dim, int64_t edge_order) {
2158 // No device check
2159 // DeviceGuard omitted
2160 return at::native::gradient(self, spacing, dim, edge_order);
2161}
2162} // anonymous namespace
2163namespace {
2164::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_scalararray_gradient(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) {
2165 // No device check
2166 // DeviceGuard omitted
2167 return at::native::gradient(self, spacing, dim, edge_order);
2168}
2169} // anonymous namespace
2170namespace {
2171::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_array_gradient(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) {
2172 // No device check
2173 // DeviceGuard omitted
2174 return at::native::gradient(self, dim, edge_order);
2175}
2176} // anonymous namespace
2177namespace {
2178::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_scalarrayint_gradient(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, c10::optional<int64_t> dim, int64_t edge_order) {
2179 // No device check
2180 // DeviceGuard omitted
2181 return at::native::gradient(self, spacing, dim, edge_order);
2182}
2183} // anonymous namespace
2184namespace {
2185::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_scalarrayarray_gradient(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order) {
2186 // No device check
2187 // DeviceGuard omitted
2188 return at::native::gradient(self, spacing, dim, edge_order);
2189}
2190} // anonymous namespace
2191namespace {
2192::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_tensorarrayint_gradient(const at::Tensor & self, at::TensorList spacing, c10::optional<int64_t> dim, int64_t edge_order) {
2193 // No device check
2194 // DeviceGuard omitted
2195 return at::native::gradient(self, spacing, dim, edge_order);
2196}
2197} // anonymous namespace
2198namespace {
2199::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_tensorarray_gradient(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) {
2200 // No device check
2201 // DeviceGuard omitted
2202 return at::native::gradient(self, spacing, dim, edge_order);
2203}
2204} // anonymous namespace
2205namespace {
2206at::Tensor wrapper_CompositeImplicitAutograd_Tensor_divide(const at::Tensor & self, const at::Tensor & other) {
2207 // No device check
2208 // DeviceGuard omitted
2209 return at::native::divide(self, other);
2210}
2211} // anonymous namespace
2212namespace {
2213at::Tensor & wrapper_CompositeImplicitAutograd_out_divide_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2214 // No device check
2215 // DeviceGuard omitted
2216 return at::native::divide_out(self, other, out);
2217}
2218} // anonymous namespace
2219namespace {
2220at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_divide_(at::Tensor & self, const at::Tensor & other) {
2221 // No device check
2222 // DeviceGuard omitted
2223 return at::native::divide_(self, other);
2224}
2225} // anonymous namespace
2226namespace {
2227at::Tensor wrapper_CompositeImplicitAutograd_Scalar_divide(const at::Tensor & self, const at::Scalar & other) {
2228 // No device check
2229 // DeviceGuard omitted
2230 return at::native::divide(self, other);
2231}
2232} // anonymous namespace
2233namespace {
2234at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_divide_(at::Tensor & self, const at::Scalar & other) {
2235 // No device check
2236 // DeviceGuard omitted
2237 return at::native::divide_(self, other);
2238}
2239} // anonymous namespace
2240namespace {
2241at::Tensor wrapper_CompositeImplicitAutograd_Tensor_mode_divide(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2242 // No device check
2243 // DeviceGuard omitted
2244 return at::native::divide(self, other, rounding_mode);
2245}
2246} // anonymous namespace
2247namespace {
2248at::Tensor & wrapper_CompositeImplicitAutograd_out_mode_divide_out(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
2249 // No device check
2250 // DeviceGuard omitted
2251 return at::native::divide_out(self, other, rounding_mode, out);
2252}
2253} // anonymous namespace
2254namespace {
2255at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_mode_divide_(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2256 // No device check
2257 // DeviceGuard omitted
2258 return at::native::divide_(self, other, rounding_mode);
2259}
2260} // anonymous namespace
2261namespace {
2262at::Tensor wrapper_CompositeImplicitAutograd_Scalar_mode_divide(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
2263 // No device check
2264 // DeviceGuard omitted
2265 return at::native::divide(self, other, rounding_mode);
2266}
2267} // anonymous namespace
2268namespace {
2269at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_mode_divide_(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
2270 // No device check
2271 // DeviceGuard omitted
2272 return at::native::divide_(self, other, rounding_mode);
2273}
2274} // anonymous namespace
2275namespace {
2276at::Tensor wrapper_CompositeImplicitAutograd_Tensor_true_divide(const at::Tensor & self, const at::Tensor & other) {
2277 // No device check
2278 // DeviceGuard omitted
2279 return at::native::true_divide(self, other);
2280}
2281} // anonymous namespace
2282namespace {
2283at::Tensor & wrapper_CompositeImplicitAutograd_out_true_divide_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2284 // No device check
2285 // DeviceGuard omitted
2286 return at::native::true_divide_out(self, other, out);
2287}
2288} // anonymous namespace
2289namespace {
2290at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_true_divide_(at::Tensor & self, const at::Tensor & other) {
2291 // No device check
2292 // DeviceGuard omitted
2293 return at::native::true_divide_(self, other);
2294}
2295} // anonymous namespace
2296namespace {
2297at::Tensor wrapper_CompositeImplicitAutograd_Scalar_true_divide(const at::Tensor & self, const at::Scalar & other) {
2298 // No device check
2299 // DeviceGuard omitted
2300 return at::native::true_divide(self, other);
2301}
2302} // anonymous namespace
2303namespace {
2304at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_true_divide_(at::Tensor & self, const at::Scalar & other) {
2305 // No device check
2306 // DeviceGuard omitted
2307 return at::native::true_divide_(self, other);
2308}
2309} // anonymous namespace
2310namespace {
2311at::Tensor wrapper_CompositeImplicitAutograd__einsum(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) {
2312 // No device check
2313 // DeviceGuard omitted
2314 return at::native::einsum(equation, tensors, path);
2315}
2316} // anonymous namespace
2317namespace {
2318at::Tensor wrapper_CompositeImplicitAutograd__embedding_backward(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
2319 // No device check
2320 // DeviceGuard omitted
2321 return at::native::embedding_backward_symint(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
2322}
2323} // anonymous namespace
2324namespace {
2325at::Tensor wrapper_CompositeImplicitAutograd__embedding_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
2326 // No device check
2327 // DeviceGuard omitted
2328 return at::native::embedding_sparse_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq);
2329}
2330} // anonymous namespace
2331namespace {
2332::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___rowwise_prune(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) {
2333 // No device check
2334 // DeviceGuard omitted
2335 return at::native::_rowwise_prune(weight, mask, compressed_indices_dtype);
2336}
2337} // anonymous namespace
2338namespace {
2339at::Tensor wrapper_CompositeImplicitAutograd__row_stack(at::TensorList tensors) {
2340 // No device check
2341 // DeviceGuard omitted
2342 return at::native::row_stack(tensors);
2343}
2344} // anonymous namespace
2345namespace {
2346at::Tensor & wrapper_CompositeImplicitAutograd_out_row_stack_out(at::TensorList tensors, at::Tensor & out) {
2347 // No device check
2348 // DeviceGuard omitted
2349 return at::native::row_stack_out(tensors, out);
2350}
2351} // anonymous namespace
2352namespace {
2353::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset) {
2354 // No device check
2355 // DeviceGuard omitted
2356 return at::native::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset);
2357}
2358} // anonymous namespace
2359namespace {
2360::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_padding_idx_embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx) {
2361 // No device check
2362 // DeviceGuard omitted
2363 return at::native::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
2364}
2365} // anonymous namespace
2366namespace {
2367at::Tensor wrapper_CompositeImplicitAutograd___embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
2368 // No device check
2369 // DeviceGuard omitted
2370 return at::native::_embedding_bag_backward_symint(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
2371}
2372} // anonymous namespace
2373namespace {
2374at::Tensor wrapper_CompositeImplicitAutograd___embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
2375 // No device check
2376 // DeviceGuard omitted
2377 return at::native::_embedding_bag_sparse_backward_symint(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
2378}
2379} // anonymous namespace
2380namespace {
2381at::Tensor & wrapper_CompositeImplicitAutograd_out_empty_out(c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
2382 // No device check
2383 // DeviceGuard omitted
2384 return at::native::empty_out(C10_AS_INTARRAYREF_SLOW(size), memory_format, out);
2385}
2386} // anonymous namespace
2387namespace {
2388at::Tensor wrapper_CompositeImplicitAutograd__expand_as(const at::Tensor & self, const at::Tensor & other) {
2389 // No device check
2390 // DeviceGuard omitted
2391 return at::native::expand_as(self, other);
2392}
2393} // anonymous namespace
2394namespace {
2395at::Tensor wrapper_CompositeImplicitAutograd_using_ints_flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim) {
2396 // No device check
2397 // DeviceGuard omitted
2398 return at::native::flatten(self, start_dim, end_dim);
2399}
2400} // anonymous namespace
2401namespace {
2402at::Tensor wrapper_CompositeImplicitAutograd_named_out_dim_flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) {
2403 // No device check
2404 // DeviceGuard omitted
2405 return at::native::flatten(self, start_dim, end_dim, out_dim);
2406}
2407} // anonymous namespace
2408namespace {
2409at::Tensor wrapper_CompositeImplicitAutograd_using_names_flatten(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) {
2410 // No device check
2411 // DeviceGuard omitted
2412 return at::native::flatten(self, start_dim, end_dim, out_dim);
2413}
2414} // anonymous namespace
2415namespace {
2416at::Tensor wrapper_CompositeImplicitAutograd_DimnameList_flatten(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) {
2417 // No device check
2418 // DeviceGuard omitted
2419 return at::native::flatten(self, dims, out_dim);
2420}
2421} // anonymous namespace
2422namespace {
2423at::Tensor wrapper_CompositeImplicitAutograd_int_unflatten(const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) {
2424 // No device check
2425 // DeviceGuard omitted
2426 return at::native::unflatten(self, dim, sizes);
2427}
2428} // anonymous namespace
2429namespace {
2430at::Tensor wrapper_CompositeImplicitAutograd_Dimname_unflatten(const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) {
2431 // No device check
2432 // DeviceGuard omitted
2433 return at::native::unflatten(self, dim, sizes, names);
2434}
2435} // anonymous namespace
2436namespace {
2437at::Tensor wrapper_CompositeImplicitAutograd_Scalar_floor_divide(const at::Tensor & self, const at::Scalar & other) {
2438 // No device check
2439 // DeviceGuard omitted
2440 return at::native::floor_divide(self, other);
2441}
2442} // anonymous namespace
2443namespace {
2444at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_floor_divide_(at::Tensor & self, const at::Scalar & other) {
2445 // No device check
2446 // DeviceGuard omitted
2447 return at::native::floor_divide_(self, other);
2448}
2449} // anonymous namespace
2450namespace {
2451at::Tensor wrapper_CompositeImplicitAutograd__grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
2452 // No device check
2453 // DeviceGuard omitted
2454 return at::native::grid_sampler(input, grid, interpolation_mode, padding_mode, align_corners);
2455}
2456} // anonymous namespace
2457namespace {
2458::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___grid_sampler_2d_cpu_fallback_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
2459 // No device check
2460 // DeviceGuard omitted
2461 return at::native::_grid_sampler_2d_cpu_fallback_backward(grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
2462}
2463} // anonymous namespace
2464namespace {
2465at::Tensor wrapper_CompositeImplicitAutograd__hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) {
2466 // No device check
2467 // DeviceGuard omitted
2468 return at::native::hinge_embedding_loss(self, target, margin, reduction);
2469}
2470} // anonymous namespace
2471namespace {
2472at::Tensor wrapper_CompositeImplicitAutograd__group_norm(const at::Tensor & input, int64_t num_groups, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enabled) {
2473 // No device check
2474 // DeviceGuard omitted
2475 return at::native::group_norm(input, num_groups, weight, bias, eps, cudnn_enabled);
2476}
2477} // anonymous namespace
2478namespace {
2479int64_t wrapper_CompositeImplicitAutograd___cufft_get_plan_cache_size(int64_t device_index) {
2480 // No device check
2481 // DeviceGuard omitted
2482 return at::native::_cufft_get_plan_cache_size(device_index);
2483}
2484} // anonymous namespace
2485namespace {
2486int64_t wrapper_CompositeImplicitAutograd___cufft_get_plan_cache_max_size(int64_t device_index) {
2487 // No device check
2488 // DeviceGuard omitted
2489 return at::native::_cufft_get_plan_cache_max_size(device_index);
2490}
2491} // anonymous namespace
2492namespace {
2493void wrapper_CompositeImplicitAutograd___cufft_set_plan_cache_max_size(int64_t device_index, int64_t max_size) {
2494 // No device check
2495 // DeviceGuard omitted
2496 return at::native::_cufft_set_plan_cache_max_size(device_index, max_size);
2497}
2498} // anonymous namespace
2499namespace {
2500void wrapper_CompositeImplicitAutograd___cufft_clear_plan_cache(int64_t device_index) {
2501 // No device check
2502 // DeviceGuard omitted
2503 return at::native::_cufft_clear_plan_cache(device_index);
2504}
2505} // anonymous namespace
2506namespace {
2507at::Tensor & wrapper_CompositeImplicitAutograd_dimname_index_copy_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
2508 // No device check
2509 // DeviceGuard omitted
2510 return at::native::index_copy_(self, dim, index, source);
2511}
2512} // anonymous namespace
2513namespace {
2514at::Tensor wrapper_CompositeImplicitAutograd_dimname_index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
2515 // No device check
2516 // DeviceGuard omitted
2517 return at::native::index_copy(self, dim, index, source);
2518}
2519} // anonymous namespace
2520namespace {
2521at::Tensor wrapper_CompositeImplicitAutograd__instance_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
2522 // No device check
2523 // DeviceGuard omitted
2524 return at::native::instance_norm(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
2525}
2526} // anonymous namespace
2527namespace {
2528at::Tensor wrapper_CompositeImplicitAutograd__isclose(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
2529 // No device check
2530 // DeviceGuard omitted
2531 return at::native::isclose(self, other, rtol, atol, equal_nan);
2532}
2533} // anonymous namespace
2534namespace {
2535bool wrapper_CompositeImplicitAutograd__is_distributed(const at::Tensor & self) {
2536 // No device check
2537 // DeviceGuard omitted
2538 return at::native::is_distributed(self);
2539}
2540} // anonymous namespace
2541namespace {
2542bool wrapper_CompositeImplicitAutograd__is_floating_point(const at::Tensor & self) {
2543 // No device check
2544 // DeviceGuard omitted
2545 return at::native::is_floating_point(self);
2546}
2547} // anonymous namespace
2548namespace {
2549bool wrapper_CompositeImplicitAutograd__is_complex(const at::Tensor & self) {
2550 // No device check
2551 // DeviceGuard omitted
2552 return at::native::is_complex(self);
2553}
2554} // anonymous namespace
2555namespace {
2556bool wrapper_CompositeImplicitAutograd__is_conj(const at::Tensor & self) {
2557 // No device check
2558 // DeviceGuard omitted
2559 return at::native::is_conj(self);
2560}
2561} // anonymous namespace
2562namespace {
2563bool wrapper_CompositeImplicitAutograd___is_zerotensor(const at::Tensor & self) {
2564 // No device check
2565 // DeviceGuard omitted
2566 return at::native::_is_zerotensor(self);
2567}
2568} // anonymous namespace
2569namespace {
2570bool wrapper_CompositeImplicitAutograd__is_neg(const at::Tensor & self) {
2571 // No device check
2572 // DeviceGuard omitted
2573 return at::native::is_neg(self);
2574}
2575} // anonymous namespace
2576namespace {
2577at::Tensor wrapper_CompositeImplicitAutograd__isreal(const at::Tensor & self) {
2578 // No device check
2579 // DeviceGuard omitted
2580 return at::native::isreal(self);
2581}
2582} // anonymous namespace
2583namespace {
2584bool wrapper_CompositeImplicitAutograd__is_nonzero(const at::Tensor & self) {
2585 // No device check
2586 // DeviceGuard omitted
2587 return at::native::is_nonzero(self);
2588}
2589} // anonymous namespace
2590namespace {
2591bool wrapper_CompositeImplicitAutograd__is_signed(const at::Tensor & self) {
2592 // No device check
2593 // DeviceGuard omitted
2594 return at::native::is_signed(self);
2595}
2596} // anonymous namespace
2597namespace {
2598bool wrapper_CompositeImplicitAutograd__is_inference(const at::Tensor & self) {
2599 // No device check
2600 // DeviceGuard omitted
2601 return at::native::is_inference(self);
2602}
2603} // anonymous namespace
2604namespace {
2605at::Tensor wrapper_CompositeImplicitAutograd__kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) {
2606 // No device check
2607 // DeviceGuard omitted
2608 return at::native::kl_div(self, target, reduction, log_target);
2609}
2610} // anonymous namespace
2611namespace {
2612at::Tensor wrapper_CompositeImplicitAutograd__kron(const at::Tensor & self, const at::Tensor & other) {
2613 // No device check
2614 // DeviceGuard omitted
2615 return at::native::kron(self, other);
2616}
2617} // anonymous namespace
2618namespace {
2619at::Tensor & wrapper_CompositeImplicitAutograd_out_kron_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2620 // No device check
2621 // DeviceGuard omitted
2622 return at::native::kron_out(self, other, out);
2623}
2624} // anonymous namespace
2625namespace {
2626::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dimname_kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) {
2627 // No device check
2628 // DeviceGuard omitted
2629 return at::native::kthvalue(self, k, dim, keepdim);
2630}
2631} // anonymous namespace
2632namespace {
2633::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_dimname_out_kthvalue_out(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
2634 // No device check
2635 // DeviceGuard omitted
2636 return at::native::kthvalue_out(self, k, dim, keepdim, values, indices);
2637}
2638} // anonymous namespace
2639namespace {
2640at::Tensor wrapper_CompositeImplicitAutograd__layer_norm(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enable) {
2641 // No device check
2642 // DeviceGuard omitted
2643 return at::native::layer_norm_symint(input, normalized_shape, weight, bias, eps, cudnn_enable);
2644}
2645} // anonymous namespace
2646namespace {
2647at::Tensor wrapper_CompositeImplicitAutograd__linear(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
2648 // No device check
2649 // DeviceGuard omitted
2650 return at::native::linear(input, weight, bias);
2651}
2652} // anonymous namespace
2653namespace {
2654at::Tensor wrapper_CompositeImplicitAutograd__fbgemm_linear_int8_weight_fp32_activation(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
2655 // No device check
2656 // DeviceGuard omitted
2657 return at::native::fbgemm_linear_int8_weight_fp32_activation(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
2658}
2659} // anonymous namespace
2660namespace {
2661at::Tensor wrapper_CompositeImplicitAutograd__fbgemm_linear_int8_weight(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
2662 // No device check
2663 // DeviceGuard omitted
2664 return at::native::fbgemm_linear_int8_weight(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
2665}
2666} // anonymous namespace
2667namespace {
2668::std::tuple<at::Tensor,at::Tensor,double,int64_t> wrapper_CompositeImplicitAutograd__fbgemm_linear_quantize_weight(const at::Tensor & input) {
2669 // No device check
2670 // DeviceGuard omitted
2671 return at::native::fbgemm_linear_quantize_weight(input);
2672}
2673} // anonymous namespace
2674namespace {
2675at::Tensor wrapper_CompositeImplicitAutograd__fbgemm_pack_gemm_matrix_fp16(const at::Tensor & input) {
2676 // No device check
2677 // DeviceGuard omitted
2678 return at::native::fbgemm_pack_gemm_matrix_fp16(input);
2679}
2680} // anonymous namespace
2681namespace {
2682at::Tensor wrapper_CompositeImplicitAutograd__fbgemm_linear_fp16_weight_fp32_activation(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
2683 // No device check
2684 // DeviceGuard omitted
2685 return at::native::fbgemm_linear_fp16_weight_fp32_activation(input, packed_weight, bias);
2686}
2687} // anonymous namespace
2688namespace {
2689at::Tensor wrapper_CompositeImplicitAutograd__fbgemm_linear_fp16_weight(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
2690 // No device check
2691 // DeviceGuard omitted
2692 return at::native::fbgemm_linear_fp16_weight(input, packed_weight, bias);
2693}
2694} // anonymous namespace
2695namespace {
2696at::Tensor wrapper_CompositeImplicitAutograd__fbgemm_pack_quantized_matrix(const at::Tensor & input) {
2697 // No device check
2698 // DeviceGuard omitted
2699 return at::native::fbgemm_pack_quantized_matrix(input);
2700}
2701} // anonymous namespace
2702namespace {
2703at::Tensor wrapper_CompositeImplicitAutograd_KN_fbgemm_pack_quantized_matrix(const at::Tensor & input, int64_t K, int64_t N) {
2704 // No device check
2705 // DeviceGuard omitted
2706 return at::native::fbgemm_pack_quantized_matrix(input, K, N);
2707}
2708} // anonymous namespace
2709namespace {
2710at::Tensor wrapper_CompositeImplicitAutograd_Tensor_ldexp(const at::Tensor & self, const at::Tensor & other) {
2711 // No device check
2712 // DeviceGuard omitted
2713 return at::native::ldexp(self, other);
2714}
2715} // anonymous namespace
2716namespace {
2717at::Tensor & wrapper_CompositeImplicitAutograd_out_ldexp_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2718 // No device check
2719 // DeviceGuard omitted
2720 return at::native::ldexp_out(self, other, out);
2721}
2722} // anonymous namespace
2723namespace {
2724at::Tensor & wrapper_CompositeImplicitAutograd__ldexp_(at::Tensor & self, const at::Tensor & other) {
2725 // No device check
2726 // DeviceGuard omitted
2727 return at::native::ldexp_(self, other);
2728}
2729} // anonymous namespace
2730namespace {
2731at::Tensor wrapper_CompositeImplicitAutograd_int_log_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
2732 // No device check
2733 // DeviceGuard omitted
2734 return at::native::log_softmax(self, dim, dtype);
2735}
2736} // anonymous namespace
2737namespace {
2738at::Tensor wrapper_CompositeImplicitAutograd_Dimname_log_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
2739 // No device check
2740 // DeviceGuard omitted
2741 return at::native::log_softmax(self, dim, dtype);
2742}
2743} // anonymous namespace
2744namespace {
2745at::Tensor wrapper_CompositeImplicitAutograd_dimname_logcumsumexp(const at::Tensor & self, at::Dimname dim) {
2746 // No device check
2747 // DeviceGuard omitted
2748 return at::native::logcumsumexp(self, dim);
2749}
2750} // anonymous namespace
2751namespace {
2752at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_logcumsumexp_out(const at::Tensor & self, at::Dimname dim, at::Tensor & out) {
2753 // No device check
2754 // DeviceGuard omitted
2755 return at::native::logcumsumexp_out(self, dim, out);
2756}
2757} // anonymous namespace
2758namespace {
2759at::Tensor wrapper_CompositeImplicitAutograd_names_logsumexp(const at::Tensor & self, at::DimnameList dim, bool keepdim) {
2760 // No device check
2761 // DeviceGuard omitted
2762 return at::native::logsumexp(self, dim, keepdim);
2763}
2764} // anonymous namespace
2765namespace {
2766at::Tensor & wrapper_CompositeImplicitAutograd_names_out_logsumexp_out(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) {
2767 // No device check
2768 // DeviceGuard omitted
2769 return at::native::logsumexp_out(self, dim, keepdim, out);
2770}
2771} // anonymous namespace
2772namespace {
2773at::Tensor wrapper_CompositeImplicitAutograd__margin_ranking_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
2774 // No device check
2775 // DeviceGuard omitted
2776 return at::native::margin_ranking_loss(input1, input2, target, margin, reduction);
2777}
2778} // anonymous namespace
2779namespace {
2780at::Tensor wrapper_CompositeImplicitAutograd__matmul(const at::Tensor & self, const at::Tensor & other) {
2781 // No device check
2782 // DeviceGuard omitted
2783 return at::native::matmul(self, other);
2784}
2785} // anonymous namespace
2786namespace {
2787at::Tensor & wrapper_CompositeImplicitAutograd_out_matmul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2788 // No device check
2789 // DeviceGuard omitted
2790 return at::native::matmul_out(self, other, out);
2791}
2792} // anonymous namespace
2793namespace {
2794at::Tensor wrapper_CompositeImplicitAutograd__matrix_power(const at::Tensor & self, int64_t n) {
2795 // No device check
2796 // DeviceGuard omitted
2797 return at::native::matrix_power(self, n);
2798}
2799} // anonymous namespace
2800namespace {
2801at::Tensor & wrapper_CompositeImplicitAutograd_out_matrix_power_out(const at::Tensor & self, int64_t n, at::Tensor & out) {
2802 // No device check
2803 // DeviceGuard omitted
2804 return at::native::matrix_power_out(self, n, out);
2805}
2806} // anonymous namespace
2807namespace {
2808at::Tensor wrapper_CompositeImplicitAutograd__matrix_exp(const at::Tensor & self) {
2809 // No device check
2810 // DeviceGuard omitted
2811 return at::native::matrix_exp(self);
2812}
2813} // anonymous namespace
2814namespace {
2815at::Tensor wrapper_CompositeImplicitAutograd__matrix_exp_backward(const at::Tensor & self, const at::Tensor & grad) {
2816 // No device check
2817 // DeviceGuard omitted
2818 return at::native::matrix_exp_backward(self, grad);
2819}
2820} // anonymous namespace
2821namespace {
2822::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_names_dim_max(const at::Tensor & self, at::Dimname dim, bool keepdim) {
2823 // No device check
2824 // DeviceGuard omitted
2825 return at::native::max(self, dim, keepdim);
2826}
2827} // anonymous namespace
2828namespace {
2829::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_names_dim_max_max_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
2830 // No device check
2831 // DeviceGuard omitted
2832 return at::native::max_out(self, dim, keepdim, max, max_values);
2833}
2834} // anonymous namespace
2835namespace {
2836at::Tensor wrapper_CompositeImplicitAutograd__value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) {
2837 // No device check
2838 // DeviceGuard omitted
2839 return at::native::value_selecting_reduction_backward_symint(grad, dim, indices, sizes, keepdim);
2840}
2841} // anonymous namespace
2842namespace {
2843::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__max_pool1d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
2844 // No device check
2845 // DeviceGuard omitted
2846 return at::native::max_pool1d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
2847}
2848} // anonymous namespace
2849namespace {
2850at::Tensor wrapper_CompositeImplicitAutograd__max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
2851 // No device check
2852 // DeviceGuard omitted
2853 return at::native::max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode);
2854}
2855} // anonymous namespace
2856namespace {
2857at::Tensor wrapper_CompositeImplicitAutograd__max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
2858 // No device check
2859 // DeviceGuard omitted
2860 return at::native::max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode);
2861}
2862} // anonymous namespace
2863namespace {
2864at::Tensor wrapper_CompositeImplicitAutograd__max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
2865 // No device check
2866 // DeviceGuard omitted
2867 return at::native::max_pool3d(self, kernel_size, stride, padding, dilation, ceil_mode);
2868}
2869} // anonymous namespace
2870namespace {
2871at::Tensor wrapper_CompositeImplicitAutograd_names_dim_mean(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
2872 // No device check
2873 // DeviceGuard omitted
2874 return at::native::mean(self, dim, keepdim, dtype);
2875}
2876} // anonymous namespace
2877namespace {
2878at::Tensor & wrapper_CompositeImplicitAutograd_names_out_mean_out(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2879 // No device check
2880 // DeviceGuard omitted
2881 return at::native::mean_out(self, dim, keepdim, dtype, out);
2882}
2883} // anonymous namespace
2884namespace {
2885at::Tensor wrapper_CompositeImplicitAutograd__nanmean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
2886 // No device check
2887 // DeviceGuard omitted
2888 return at::native::nanmean(self, dim, keepdim, dtype);
2889}
2890} // anonymous namespace
2891namespace {
2892at::Tensor & wrapper_CompositeImplicitAutograd_out_nanmean_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2893 // No device check
2894 // DeviceGuard omitted
2895 return at::native::nanmean_out(self, dim, keepdim, dtype, out);
2896}
2897} // anonymous namespace
2898namespace {
2899::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_names_dim_median(const at::Tensor & self, at::Dimname dim, bool keepdim) {
2900 // No device check
2901 // DeviceGuard omitted
2902 return at::native::median(self, dim, keepdim);
2903}
2904} // anonymous namespace
2905namespace {
2906::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_names_dim_values_median_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
2907 // No device check
2908 // DeviceGuard omitted
2909 return at::native::median_out(self, dim, keepdim, values, indices);
2910}
2911} // anonymous namespace
2912namespace {
2913::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_names_dim_nanmedian(const at::Tensor & self, at::Dimname dim, bool keepdim) {
2914 // No device check
2915 // DeviceGuard omitted
2916 return at::native::nanmedian(self, dim, keepdim);
2917}
2918} // anonymous namespace
2919namespace {
2920::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_names_dim_values_nanmedian_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
2921 // No device check
2922 // DeviceGuard omitted
2923 return at::native::nanmedian_out(self, dim, keepdim, values, indices);
2924}
2925} // anonymous namespace
2926namespace {
2927::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_names_dim_min(const at::Tensor & self, at::Dimname dim, bool keepdim) {
2928 // No device check
2929 // DeviceGuard omitted
2930 return at::native::min(self, dim, keepdim);
2931}
2932} // anonymous namespace
2933namespace {
2934::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_names_dim_min_min_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
2935 // No device check
2936 // DeviceGuard omitted
2937 return at::native::min_out(self, dim, keepdim, min, min_indices);
2938}
2939} // anonymous namespace
2940namespace {
2941at::Tensor wrapper_CompositeImplicitAutograd___sparse_mm(const at::Tensor & sparse, const at::Tensor & dense) {
2942 // No device check
2943 // DeviceGuard omitted
2944 return at::native::_sparse_mm(sparse, dense);
2945}
2946} // anonymous namespace
2947namespace {
2948at::Tensor wrapper_CompositeImplicitAutograd_reduce__sparse_mm(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) {
2949 // No device check
2950 // DeviceGuard omitted
2951 return at::native::_sparse_mm(sparse, dense, reduce);
2952}
2953} // anonymous namespace
2954namespace {
2955::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dimname_mode(const at::Tensor & self, at::Dimname dim, bool keepdim) {
2956 // No device check
2957 // DeviceGuard omitted
2958 return at::native::mode(self, dim, keepdim);
2959}
2960} // anonymous namespace
2961namespace {
2962::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_dimname_out_mode_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
2963 // No device check
2964 // DeviceGuard omitted
2965 return at::native::mode_out(self, dim, keepdim, values, indices);
2966}
2967} // anonymous namespace
2968namespace {
2969at::Tensor wrapper_CompositeImplicitAutograd_Tensor_multiply(const at::Tensor & self, const at::Tensor & other) {
2970 // No device check
2971 // DeviceGuard omitted
2972 return at::native::multiply(self, other);
2973}
2974} // anonymous namespace
2975namespace {
2976at::Tensor & wrapper_CompositeImplicitAutograd_out_multiply_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2977 // No device check
2978 // DeviceGuard omitted
2979 return at::native::multiply_out(self, other, out);
2980}
2981} // anonymous namespace
2982namespace {
2983at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_multiply_(at::Tensor & self, const at::Tensor & other) {
2984 // No device check
2985 // DeviceGuard omitted
2986 return at::native::multiply_(self, other);
2987}
2988} // anonymous namespace
2989namespace {
2990at::Tensor wrapper_CompositeImplicitAutograd_Scalar_multiply(const at::Tensor & self, const at::Scalar & other) {
2991 // No device check
2992 // DeviceGuard omitted
2993 return at::native::multiply(self, other);
2994}
2995} // anonymous namespace
2996namespace {
2997at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_multiply_(at::Tensor & self, const at::Scalar & other) {
2998 // No device check
2999 // DeviceGuard omitted
3000 return at::native::multiply_(self, other);
3001}
3002} // anonymous namespace
3003namespace {
3004at::Tensor wrapper_CompositeImplicitAutograd__narrow(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
3005 // No device check
3006 // DeviceGuard omitted
3007 return at::native::narrow_symint(self, dim, start, length);
3008}
3009} // anonymous namespace
3010namespace {
3011at::Tensor wrapper_CompositeImplicitAutograd_Tensor_narrow(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) {
3012 // No device check
3013 // DeviceGuard omitted
3014 return at::native::narrow_tensor_symint(self, dim, start, length);
3015}
3016} // anonymous namespace
3017namespace {
3018bool wrapper_CompositeImplicitAutograd__is_vulkan_available() {
3019 // No device check
3020 // DeviceGuard omitted
3021 return at::native::is_vulkan_available();
3022}
3023} // anonymous namespace
3024namespace {
3025bool wrapper_CompositeImplicitAutograd___nnpack_available() {
3026 // No device check
3027 // DeviceGuard omitted
3028 return at::native::_nnpack_available();
3029}
3030} // anonymous namespace
3031namespace {
3032at::Tensor wrapper_CompositeImplicitAutograd__pairwise_distance(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) {
3033 // No device check
3034 // DeviceGuard omitted
3035 return at::native::pairwise_distance(x1, x2, p, eps, keepdim);
3036}
3037} // anonymous namespace
3038namespace {
3039at::Tensor wrapper_CompositeImplicitAutograd__cdist(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
3040 // No device check
3041 // DeviceGuard omitted
3042 return at::native::cdist(x1, x2, p, compute_mode);
3043}
3044} // anonymous namespace
3045namespace {
3046at::Tensor wrapper_CompositeImplicitAutograd__pdist(const at::Tensor & self, double p) {
3047 // No device check
3048 // DeviceGuard omitted
3049 return at::native::pdist(self, p);
3050}
3051} // anonymous namespace
3052namespace {
3053at::Tensor wrapper_CompositeImplicitAutograd__cosine_similarity(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) {
3054 // No device check
3055 // DeviceGuard omitted
3056 return at::native::cosine_similarity(x1, x2, dim, eps);
3057}
3058} // anonymous namespace
3059namespace {
3060at::Tensor wrapper_CompositeImplicitAutograd_intlist_movedim(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
3061 // No device check
3062 // DeviceGuard omitted
3063 return at::native::movedim(self, source, destination);
3064}
3065} // anonymous namespace
3066namespace {
3067at::Tensor wrapper_CompositeImplicitAutograd_int_movedim(const at::Tensor & self, int64_t source, int64_t destination) {
3068 // No device check
3069 // DeviceGuard omitted
3070 return at::native::movedim(self, source, destination);
3071}
3072} // anonymous namespace
3073namespace {
3074at::Tensor wrapper_CompositeImplicitAutograd_intlist_moveaxis(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
3075 // No device check
3076 // DeviceGuard omitted
3077 return at::native::moveaxis(self, source, destination);
3078}
3079} // anonymous namespace
3080namespace {
3081at::Tensor wrapper_CompositeImplicitAutograd_int_moveaxis(const at::Tensor & self, int64_t source, int64_t destination) {
3082 // No device check
3083 // DeviceGuard omitted
3084 return at::native::moveaxis(self, source, destination);
3085}
3086} // anonymous namespace
3087namespace {
3088at::Tensor wrapper_CompositeImplicitAutograd__numpy_T(const at::Tensor & self) {
3089 // No device check
3090 // DeviceGuard omitted
3091 return at::native::numpy_T(self);
3092}
3093} // anonymous namespace
3094namespace {
3095at::Tensor wrapper_CompositeImplicitAutograd__matrix_H(const at::Tensor & self) {
3096 // No device check
3097 // DeviceGuard omitted
3098 return at::native::matrix_H(self);
3099}
3100} // anonymous namespace
3101namespace {
3102at::Tensor wrapper_CompositeImplicitAutograd__mT(const at::Tensor & self) {
3103 // No device check
3104 // DeviceGuard omitted
3105 return at::native::mT(self);
3106}
3107} // anonymous namespace
3108namespace {
3109at::Tensor wrapper_CompositeImplicitAutograd__mH(const at::Tensor & self) {
3110 // No device check
3111 // DeviceGuard omitted
3112 return at::native::mH(self);
3113}
3114} // anonymous namespace
3115namespace {
3116at::Tensor wrapper_CompositeImplicitAutograd__adjoint(const at::Tensor & self) {
3117 // No device check
3118 // DeviceGuard omitted
3119 return at::native::adjoint(self);
3120}
3121} // anonymous namespace
3122namespace {
3123at::Tensor wrapper_CompositeImplicitAutograd__native_channel_shuffle(const at::Tensor & self, int64_t groups) {
3124 // No device check
3125 // DeviceGuard omitted
3126 return at::native::math_channel_shuffle(self, groups);
3127}
3128} // anonymous namespace
3129namespace {
3130at::Tensor wrapper_CompositeImplicitAutograd__pin_memory(const at::Tensor & self, c10::optional<at::Device> device) {
3131 // No device check
3132 // DeviceGuard omitted
3133 return at::native::pin_memory(self, device);
3134}
3135} // anonymous namespace
3136namespace {
3137at::Tensor wrapper_CompositeImplicitAutograd__pinverse(const at::Tensor & self, double rcond) {
3138 // No device check
3139 // DeviceGuard omitted
3140 return at::native::pinverse(self, rcond);
3141}
3142} // anonymous namespace
3143namespace {
3144at::Tensor wrapper_CompositeImplicitAutograd__poisson_nll_loss(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) {
3145 // No device check
3146 // DeviceGuard omitted
3147 return at::native::poisson_nll_loss(input, target, log_input, full, eps, reduction);
3148}
3149} // anonymous namespace
3150namespace {
3151at::Tensor & wrapper_CompositeImplicitAutograd_generator_out_rand_out(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
3152 // No device check
3153 // DeviceGuard omitted
3154 return at::native::rand_out(C10_AS_INTARRAYREF_SLOW(size), generator, out);
3155}
3156} // anonymous namespace
3157namespace {
3158at::Tensor & wrapper_CompositeImplicitAutograd_out_randn_out(c10::SymIntArrayRef size, at::Tensor & out) {
3159 // No device check
3160 // DeviceGuard omitted
3161 return at::native::randn_out(C10_AS_INTARRAYREF_SLOW(size), out);
3162}
3163} // anonymous namespace
3164namespace {
3165at::Tensor & wrapper_CompositeImplicitAutograd_generator_out_randn_out(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
3166 // No device check
3167 // DeviceGuard omitted
3168 return at::native::randn_out(C10_AS_INTARRAYREF_SLOW(size), generator, out);
3169}
3170} // anonymous namespace
3171namespace {
3172at::Tensor wrapper_CompositeImplicitAutograd__ravel(const at::Tensor & self) {
3173 // No device check
3174 // DeviceGuard omitted
3175 return at::native::ravel(self);
3176}
3177} // anonymous namespace
3178namespace {
3179at::Tensor wrapper_CompositeImplicitAutograd__negative(const at::Tensor & self) {
3180 // No device check
3181 // DeviceGuard omitted
3182 return at::native::negative(self);
3183}
3184} // anonymous namespace
3185namespace {
3186at::Tensor & wrapper_CompositeImplicitAutograd_out_negative_out(const at::Tensor & self, at::Tensor & out) {
3187 // No device check
3188 // DeviceGuard omitted
3189 return at::native::negative_out(self, out);
3190}
3191} // anonymous namespace
3192namespace {
3193at::Tensor & wrapper_CompositeImplicitAutograd__negative_(at::Tensor & self) {
3194 // No device check
3195 // DeviceGuard omitted
3196 return at::native::negative_(self);
3197}
3198} // anonymous namespace
3199namespace {
3200at::Tensor wrapper_CompositeImplicitAutograd_self_Tensor_repeat_interleave(const at::Tensor & self, const at::Tensor & repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) {
3201 // No device check
3202 // DeviceGuard omitted
3203 return at::native::repeat_interleave(self, repeats, dim, output_size);
3204}
3205} // anonymous namespace
3206namespace {
3207at::Tensor wrapper_CompositeImplicitAutograd_self_int_repeat_interleave(const at::Tensor & self, c10::SymInt repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) {
3208 // No device check
3209 // DeviceGuard omitted
3210 return at::native::repeat_interleave_symint(self, repeats, dim, output_size);
3211}
3212} // anonymous namespace
3213namespace {
3214at::Tensor wrapper_CompositeImplicitAutograd__reshape(const at::Tensor & self, c10::SymIntArrayRef shape) {
3215 // No device check
3216 // DeviceGuard omitted
3217 return at::native::reshape_symint(self, shape);
3218}
3219} // anonymous namespace
3220namespace {
3221at::Tensor wrapper_CompositeImplicitAutograd__reshape_as(const at::Tensor & self, const at::Tensor & other) {
3222 // No device check
3223 // DeviceGuard omitted
3224 return at::native::reshape_as(self, other);
3225}
3226} // anonymous namespace
3227namespace {
3228at::Tensor wrapper_CompositeImplicitAutograd__rrelu(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
3229 // No device check
3230 // DeviceGuard omitted
3231 return at::native::rrelu(self, lower, upper, training, generator);
3232}
3233} // anonymous namespace
3234namespace {
3235at::Tensor & wrapper_CompositeImplicitAutograd__rrelu_(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
3236 // No device check
3237 // DeviceGuard omitted
3238 return at::native::rrelu_(self, lower, upper, training, generator);
3239}
3240} // anonymous namespace
3241namespace {
3242at::Tensor wrapper_CompositeImplicitAutograd__relu6(const at::Tensor & self) {
3243 // No device check
3244 // DeviceGuard omitted
3245 return at::native::relu6(self);
3246}
3247} // anonymous namespace
3248namespace {
3249at::Tensor & wrapper_CompositeImplicitAutograd__relu6_(at::Tensor & self) {
3250 // No device check
3251 // DeviceGuard omitted
3252 return at::native::relu6_(self);
3253}
3254} // anonymous namespace
3255namespace {
3256at::Tensor wrapper_CompositeImplicitAutograd__prelu(const at::Tensor & self, const at::Tensor & weight) {
3257 // No device check
3258 // DeviceGuard omitted
3259 return at::native::prelu(self, weight);
3260}
3261} // anonymous namespace
3262namespace {
3263at::Tensor wrapper_CompositeImplicitAutograd__infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self) {
3264 // No device check
3265 // DeviceGuard omitted
3266 return at::native::infinitely_differentiable_gelu_backward(grad, self);
3267}
3268} // anonymous namespace
3269namespace {
3270at::Tensor wrapper_CompositeImplicitAutograd_Dimname_select(const at::Tensor & self, at::Dimname dim, int64_t index) {
3271 // No device check
3272 // DeviceGuard omitted
3273 return at::native::select(self, dim, index);
3274}
3275} // anonymous namespace
3276namespace {
3277at::Tensor wrapper_CompositeImplicitAutograd__selu(const at::Tensor & self) {
3278 // No device check
3279 // DeviceGuard omitted
3280 return at::native::selu(self);
3281}
3282} // anonymous namespace
3283namespace {
3284at::Tensor & wrapper_CompositeImplicitAutograd__selu_(at::Tensor & self) {
3285 // No device check
3286 // DeviceGuard omitted
3287 return at::native::selu_(self);
3288}
3289} // anonymous namespace
3290namespace {
3291at::Tensor wrapper_CompositeImplicitAutograd__silu_backward(const at::Tensor & grad_output, const at::Tensor & self) {
3292 // No device check
3293 // DeviceGuard omitted
3294 return at::native::math_silu_backward(grad_output, self);
3295}
3296} // anonymous namespace
3297namespace {
3298at::Tensor wrapper_CompositeImplicitAutograd__mish_backward(const at::Tensor & grad_output, const at::Tensor & self) {
3299 // No device check
3300 // DeviceGuard omitted
3301 return at::native::math_mish_backward(grad_output, self);
3302}
3303} // anonymous namespace
3304namespace {
3305int64_t wrapper_CompositeImplicitAutograd_int_size(const at::Tensor & self, int64_t dim) {
3306 // No device check
3307 // DeviceGuard omitted
3308 return at::native::size(self, dim);
3309}
3310} // anonymous namespace
3311namespace {
3312int64_t wrapper_CompositeImplicitAutograd_Dimname_size(const at::Tensor & self, at::Dimname dim) {
3313 // No device check
3314 // DeviceGuard omitted
3315 return at::native::size(self, dim);
3316}
3317} // anonymous namespace
3318namespace {
3319at::Tensor wrapper_CompositeImplicitAutograd__smm(const at::Tensor & self, const at::Tensor & mat2) {
3320 // No device check
3321 // DeviceGuard omitted
3322 return at::native::smm(self, mat2);
3323}
3324} // anonymous namespace
3325namespace {
3326at::Tensor wrapper_CompositeImplicitAutograd_int_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
3327 // No device check
3328 // DeviceGuard omitted
3329 return at::native::softmax(self, dim, dtype);
3330}
3331} // anonymous namespace
3332namespace {
3333at::Tensor wrapper_CompositeImplicitAutograd_Dimname_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
3334 // No device check
3335 // DeviceGuard omitted
3336 return at::native::softmax(self, dim, dtype);
3337}
3338} // anonymous namespace
3339namespace {
3340::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_sizes_split(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) {
3341 // No device check
3342 // DeviceGuard omitted
3343 return at::native::split_symint(self, split_size, dim);
3344}
3345} // anonymous namespace
3346namespace {
3347::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_int_hsplit(const at::Tensor & self, int64_t sections) {
3348 // No device check
3349 // DeviceGuard omitted
3350 return at::native::hsplit(self, sections);
3351}
3352} // anonymous namespace
3353namespace {
3354::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_array_hsplit(const at::Tensor & self, at::IntArrayRef indices) {
3355 // No device check
3356 // DeviceGuard omitted
3357 return at::native::hsplit(self, indices);
3358}
3359} // anonymous namespace
3360namespace {
3361::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_int_vsplit(const at::Tensor & self, int64_t sections) {
3362 // No device check
3363 // DeviceGuard omitted
3364 return at::native::vsplit(self, sections);
3365}
3366} // anonymous namespace
3367namespace {
3368::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_array_vsplit(const at::Tensor & self, at::IntArrayRef indices) {
3369 // No device check
3370 // DeviceGuard omitted
3371 return at::native::vsplit(self, indices);
3372}
3373} // anonymous namespace
3374namespace {
3375::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_int_dsplit(const at::Tensor & self, int64_t sections) {
3376 // No device check
3377 // DeviceGuard omitted
3378 return at::native::dsplit(self, sections);
3379}
3380} // anonymous namespace
3381namespace {
3382::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_array_dsplit(const at::Tensor & self, at::IntArrayRef indices) {
3383 // No device check
3384 // DeviceGuard omitted
3385 return at::native::dsplit(self, indices);
3386}
3387} // anonymous namespace
3388namespace {
3389at::Tensor wrapper_CompositeImplicitAutograd_dimname_squeeze(const at::Tensor & self, at::Dimname dim) {
3390 // No device check
3391 // DeviceGuard omitted
3392 return at::native::squeeze(self, dim);
3393}
3394} // anonymous namespace
3395namespace {
3396at::Tensor & wrapper_CompositeImplicitAutograd_dimname_squeeze_(at::Tensor & self, at::Dimname dim) {
3397 // No device check
3398 // DeviceGuard omitted
3399 return at::native::squeeze_(self, dim);
3400}
3401} // anonymous namespace
3402namespace {
3403at::Tensor wrapper_CompositeImplicitAutograd__sspaddmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
3404 // No device check
3405 // DeviceGuard omitted
3406 return at::native::sspaddmm(self, mat1, mat2, beta, alpha);
3407}
3408} // anonymous namespace
3409namespace {
3410at::Tensor wrapper_CompositeImplicitAutograd__hstack(at::TensorList tensors) {
3411 // No device check
3412 // DeviceGuard omitted
3413 return at::native::hstack(tensors);
3414}
3415} // anonymous namespace
3416namespace {
3417at::Tensor & wrapper_CompositeImplicitAutograd_out_hstack_out(at::TensorList tensors, at::Tensor & out) {
3418 // No device check
3419 // DeviceGuard omitted
3420 return at::native::hstack_out(tensors, out);
3421}
3422} // anonymous namespace
3423namespace {
3424at::Tensor wrapper_CompositeImplicitAutograd__vstack(at::TensorList tensors) {
3425 // No device check
3426 // DeviceGuard omitted
3427 return at::native::vstack(tensors);
3428}
3429} // anonymous namespace
3430namespace {
3431at::Tensor & wrapper_CompositeImplicitAutograd_out_vstack_out(at::TensorList tensors, at::Tensor & out) {
3432 // No device check
3433 // DeviceGuard omitted
3434 return at::native::vstack_out(tensors, out);
3435}
3436} // anonymous namespace
3437namespace {
3438at::Tensor wrapper_CompositeImplicitAutograd__dstack(at::TensorList tensors) {
3439 // No device check
3440 // DeviceGuard omitted
3441 return at::native::dstack(tensors);
3442}
3443} // anonymous namespace
3444namespace {
3445at::Tensor & wrapper_CompositeImplicitAutograd_out_dstack_out(at::TensorList tensors, at::Tensor & out) {
3446 // No device check
3447 // DeviceGuard omitted
3448 return at::native::dstack_out(tensors, out);
3449}
3450} // anonymous namespace
3451namespace {
3452at::Tensor wrapper_CompositeImplicitAutograd__stft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
3453 // No device check
3454 // DeviceGuard omitted
3455 return at::native::stft(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex);
3456}
3457} // anonymous namespace
3458namespace {
3459at::Tensor wrapper_CompositeImplicitAutograd_center_stft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
3460 // No device check
3461 // DeviceGuard omitted
3462 return at::native::stft(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex);
3463}
3464} // anonymous namespace
3465namespace {
3466at::Tensor wrapper_CompositeImplicitAutograd__istft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, bool normalized, c10::optional<bool> onesided, c10::optional<int64_t> length, bool return_complex) {
3467 // No device check
3468 // DeviceGuard omitted
3469 return at::native::istft(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
3470}
3471} // anonymous namespace
3472namespace {
3473int64_t wrapper_CompositeImplicitAutograd_int_stride(const at::Tensor & self, int64_t dim) {
3474 // No device check
3475 // DeviceGuard omitted
3476 return at::native::stride(self, dim);
3477}
3478} // anonymous namespace
3479namespace {
3480int64_t wrapper_CompositeImplicitAutograd_Dimname_stride(const at::Tensor & self, at::Dimname dim) {
3481 // No device check
3482 // DeviceGuard omitted
3483 return at::native::stride(self, dim);
3484}
3485} // anonymous namespace
3486namespace {
3487at::Tensor wrapper_CompositeImplicitAutograd_dim_DimnameList_sum(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
3488 // No device check
3489 // DeviceGuard omitted
3490 return at::native::sum(self, dim, keepdim, dtype);
3491}
3492} // anonymous namespace
3493namespace {
3494at::Tensor & wrapper_CompositeImplicitAutograd_DimnameList_out_sum_out(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
3495 // No device check
3496 // DeviceGuard omitted
3497 return at::native::sum_out(self, dim, keepdim, dtype, out);
3498}
3499} // anonymous namespace
3500namespace {
3501at::Tensor wrapper_CompositeImplicitAutograd__sum_to_size(const at::Tensor & self, at::IntArrayRef size) {
3502 // No device check
3503 // DeviceGuard omitted
3504 return at::native::sum_to_size(self, size);
3505}
3506} // anonymous namespace
3507namespace {
3508at::Tensor wrapper_CompositeImplicitAutograd__square(const at::Tensor & self) {
3509 // No device check
3510 // DeviceGuard omitted
3511 return at::native::square(self);
3512}
3513} // anonymous namespace
3514namespace {
3515at::Tensor & wrapper_CompositeImplicitAutograd_out_square_out(const at::Tensor & self, at::Tensor & out) {
3516 // No device check
3517 // DeviceGuard omitted
3518 return at::native::square_out(self, out);
3519}
3520} // anonymous namespace
3521namespace {
3522at::Tensor & wrapper_CompositeImplicitAutograd__square_(at::Tensor & self) {
3523 // No device check
3524 // DeviceGuard omitted
3525 return at::native::square_(self);
3526}
3527} // anonymous namespace
3528namespace {
3529at::Tensor wrapper_CompositeImplicitAutograd__std(const at::Tensor & self, bool unbiased) {
3530 // No device check
3531 // DeviceGuard omitted
3532 return at::native::std(self, unbiased);
3533}
3534} // anonymous namespace
3535namespace {
3536at::Tensor wrapper_CompositeImplicitAutograd_dim_std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
3537 // No device check
3538 // DeviceGuard omitted
3539 return at::native::std(self, dim, unbiased, keepdim);
3540}
3541} // anonymous namespace
3542namespace {
3543at::Tensor & wrapper_CompositeImplicitAutograd_out_std_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
3544 // No device check
3545 // DeviceGuard omitted
3546 return at::native::std_out(self, dim, unbiased, keepdim, out);
3547}
3548} // anonymous namespace
3549namespace {
3550::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__std_mean(const at::Tensor & self, bool unbiased) {
3551 // No device check
3552 // DeviceGuard omitted
3553 return at::native::std_mean(self, unbiased);
3554}
3555} // anonymous namespace
3556namespace {
3557::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dim_std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
3558 // No device check
3559 // DeviceGuard omitted
3560 return at::native::std_mean(self, dim, unbiased, keepdim);
3561}
3562} // anonymous namespace
3563namespace {
3564::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_names_dim_std_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
3565 // No device check
3566 // DeviceGuard omitted
3567 return at::native::std_mean(self, dim, unbiased, keepdim);
3568}
3569} // anonymous namespace
3570namespace {
3571::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_correction_names_std_mean(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
3572 // No device check
3573 // DeviceGuard omitted
3574 return at::native::std_mean(self, dim, correction, keepdim);
3575}
3576} // anonymous namespace
3577namespace {
3578at::Tensor wrapper_CompositeImplicitAutograd_names_dim_std(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
3579 // No device check
3580 // DeviceGuard omitted
3581 return at::native::std(self, dim, unbiased, keepdim);
3582}
3583} // anonymous namespace
3584namespace {
3585at::Tensor & wrapper_CompositeImplicitAutograd_names_out_std_out(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
3586 // No device check
3587 // DeviceGuard omitted
3588 return at::native::std_out(self, dim, unbiased, keepdim, out);
3589}
3590} // anonymous namespace
3591namespace {
3592at::Tensor wrapper_CompositeImplicitAutograd_correction_names_std(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
3593 // No device check
3594 // DeviceGuard omitted
3595 return at::native::std(self, dim, correction, keepdim);
3596}
3597} // anonymous namespace
3598namespace {
3599at::Tensor & wrapper_CompositeImplicitAutograd_correction_names_out_std_out(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
3600 // No device check
3601 // DeviceGuard omitted
3602 return at::native::std_out(self, dim, correction, keepdim, out);
3603}
3604} // anonymous namespace
3605namespace {
3606at::Tensor wrapper_CompositeImplicitAutograd_dim_Dimname_prod(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
3607 // No device check
3608 // DeviceGuard omitted
3609 return at::native::prod(self, dim, keepdim, dtype);
3610}
3611} // anonymous namespace
3612namespace {
3613at::Tensor & wrapper_CompositeImplicitAutograd_Dimname_out_prod_out(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
3614 // No device check
3615 // DeviceGuard omitted
3616 return at::native::prod_out(self, dim, keepdim, dtype, out);
3617}
3618} // anonymous namespace
3619namespace {
3620at::Tensor wrapper_CompositeImplicitAutograd__tensordot(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
3621 // No device check
3622 // DeviceGuard omitted
3623 return at::native::tensordot(self, other, dims_self, dims_other);
3624}
3625} // anonymous namespace
3626namespace {
3627at::Tensor wrapper_CompositeImplicitAutograd__tile(const at::Tensor & self, at::IntArrayRef dims) {
3628 // No device check
3629 // DeviceGuard omitted
3630 return at::native::tile(self, dims);
3631}
3632} // anonymous namespace
3633namespace {
3634at::Tensor wrapper_CompositeImplicitAutograd_Dimname_transpose(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) {
3635 // No device check
3636 // DeviceGuard omitted
3637 return at::native::transpose(self, dim0, dim1);
3638}
3639} // anonymous namespace
3640namespace {
3641at::Tensor wrapper_CompositeImplicitAutograd__one_hot(const at::Tensor & self, int64_t num_classes) {
3642 // No device check
3643 // DeviceGuard omitted
3644 return at::native::one_hot(self, num_classes);
3645}
3646} // anonymous namespace
3647namespace {
3648at::Tensor wrapper_CompositeImplicitAutograd__fliplr(const at::Tensor & self) {
3649 // No device check
3650 // DeviceGuard omitted
3651 return at::native::fliplr(self);
3652}
3653} // anonymous namespace
3654namespace {
3655at::Tensor wrapper_CompositeImplicitAutograd__flipud(const at::Tensor & self) {
3656 // No device check
3657 // DeviceGuard omitted
3658 return at::native::flipud(self);
3659}
3660} // anonymous namespace
3661namespace {
3662at::Tensor wrapper_CompositeImplicitAutograd_x_trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
3663 // No device check
3664 // DeviceGuard omitted
3665 return at::native::trapezoid(y, x, dim);
3666}
3667} // anonymous namespace
3668namespace {
3669at::Tensor wrapper_CompositeImplicitAutograd_dx_trapezoid(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
3670 // No device check
3671 // DeviceGuard omitted
3672 return at::native::trapezoid(y, dx, dim);
3673}
3674} // anonymous namespace
3675namespace {
3676at::Tensor wrapper_CompositeImplicitAutograd_x_trapz(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
3677 // No device check
3678 // DeviceGuard omitted
3679 return at::native::trapz(y, x, dim);
3680}
3681} // anonymous namespace
3682namespace {
3683at::Tensor wrapper_CompositeImplicitAutograd_dx_trapz(const at::Tensor & y, double dx, int64_t dim) {
3684 // No device check
3685 // DeviceGuard omitted
3686 return at::native::trapz(y, dx, dim);
3687}
3688} // anonymous namespace
3689namespace {
3690at::Tensor wrapper_CompositeImplicitAutograd__triplet_margin_loss(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) {
3691 // No device check
3692 // DeviceGuard omitted
3693 return at::native::triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction);
3694}
3695} // anonymous namespace
3696namespace {
3697at::Tensor wrapper_CompositeImplicitAutograd__fix(const at::Tensor & self) {
3698 // No device check
3699 // DeviceGuard omitted
3700 return at::native::fix(self);
3701}
3702} // anonymous namespace
3703namespace {
3704at::Tensor & wrapper_CompositeImplicitAutograd_out_fix_out(const at::Tensor & self, at::Tensor & out) {
3705 // No device check
3706 // DeviceGuard omitted
3707 return at::native::fix_out(self, out);
3708}
3709} // anonymous namespace
3710namespace {
3711at::Tensor & wrapper_CompositeImplicitAutograd__fix_(at::Tensor & self) {
3712 // No device check
3713 // DeviceGuard omitted
3714 return at::native::fix_(self);
3715}
3716} // anonymous namespace
3717namespace {
3718at::Tensor wrapper_CompositeImplicitAutograd__type_as(const at::Tensor & self, const at::Tensor & other) {
3719 // No device check
3720 // DeviceGuard omitted
3721 return at::native::type_as(self, other);
3722}
3723} // anonymous namespace
3724namespace {
3725bool wrapper_CompositeImplicitAutograd___has_compatible_shallow_copy_type(const at::Tensor & self, const at::Tensor & from) {
3726 // No device check
3727 // DeviceGuard omitted
3728 return at::native::_has_compatible_shallow_copy_type(self, from);
3729}
3730} // anonymous namespace
3731namespace {
3732at::Tensor wrapper_CompositeImplicitAutograd__vander(const at::Tensor & x, c10::optional<int64_t> N, bool increasing) {
3733 // No device check
3734 // DeviceGuard omitted
3735 return at::native::vander(x, N, increasing);
3736}
3737} // anonymous namespace
3738namespace {
3739at::Tensor wrapper_CompositeImplicitAutograd__var(const at::Tensor & self, bool unbiased) {
3740 // No device check
3741 // DeviceGuard omitted
3742 return at::native::var(self, unbiased);
3743}
3744} // anonymous namespace
3745namespace {
3746at::Tensor wrapper_CompositeImplicitAutograd_dim_var(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
3747 // No device check
3748 // DeviceGuard omitted
3749 return at::native::var(self, dim, unbiased, keepdim);
3750}
3751} // anonymous namespace
3752namespace {
3753at::Tensor & wrapper_CompositeImplicitAutograd_out_var_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
3754 // No device check
3755 // DeviceGuard omitted
3756 return at::native::var_out(self, dim, unbiased, keepdim, out);
3757}
3758} // anonymous namespace
3759namespace {
3760at::Tensor wrapper_CompositeImplicitAutograd_names_dim_var(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
3761 // No device check
3762 // DeviceGuard omitted
3763 return at::native::var(self, dim, unbiased, keepdim);
3764}
3765} // anonymous namespace
3766namespace {
3767at::Tensor & wrapper_CompositeImplicitAutograd_names_out_var_out(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
3768 // No device check
3769 // DeviceGuard omitted
3770 return at::native::var_out(self, dim, unbiased, keepdim, out);
3771}
3772} // anonymous namespace
3773namespace {
3774at::Tensor wrapper_CompositeImplicitAutograd_correction_names_var(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
3775 // No device check
3776 // DeviceGuard omitted
3777 return at::native::var(self, dim, correction, keepdim);
3778}
3779} // anonymous namespace
3780namespace {
3781at::Tensor & wrapper_CompositeImplicitAutograd_correction_names_out_var_out(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
3782 // No device check
3783 // DeviceGuard omitted
3784 return at::native::var_out(self, dim, correction, keepdim, out);
3785}
3786} // anonymous namespace
3787namespace {
3788::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__var_mean(const at::Tensor & self, bool unbiased) {
3789 // No device check
3790 // DeviceGuard omitted
3791 return at::native::var_mean(self, unbiased);
3792}
3793} // anonymous namespace
3794namespace {
3795::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dim_var_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
3796 // No device check
3797 // DeviceGuard omitted
3798 return at::native::var_mean(self, dim, unbiased, keepdim);
3799}
3800} // anonymous namespace
3801namespace {
3802::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_names_dim_var_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
3803 // No device check
3804 // DeviceGuard omitted
3805 return at::native::var_mean(self, dim, unbiased, keepdim);
3806}
3807} // anonymous namespace
3808namespace {
3809::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_correction_names_var_mean(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
3810 // No device check
3811 // DeviceGuard omitted
3812 return at::native::var_mean(self, dim, correction, keepdim);
3813}
3814} // anonymous namespace
3815namespace {
3816at::Tensor wrapper_CompositeImplicitAutograd__view_as(const at::Tensor & self, const at::Tensor & other) {
3817 // No device check
3818 // DeviceGuard omitted
3819 return at::native::view_as(self, other);
3820}
3821} // anonymous namespace
3822namespace {
3823at::Tensor wrapper_CompositeImplicitAutograd_ScalarSelf_where(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) {
3824 // No device check
3825 // DeviceGuard omitted
3826 return at::native::where(condition, self, other);
3827}
3828} // anonymous namespace
3829namespace {
3830at::Tensor wrapper_CompositeImplicitAutograd_ScalarOther_where(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) {
3831 // No device check
3832 // DeviceGuard omitted
3833 return at::native::where(condition, self, other);
3834}
3835} // anonymous namespace
3836namespace {
3837at::Tensor wrapper_CompositeImplicitAutograd_Scalar_where(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) {
3838 // No device check
3839 // DeviceGuard omitted
3840 return at::native::where(condition, self, other);
3841}
3842} // anonymous namespace
3843namespace {
3844::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__where(const at::Tensor & condition) {
3845 // No device check
3846 // DeviceGuard omitted
3847 return at::native::where(condition);
3848}
3849} // anonymous namespace
3850namespace {
3851at::Tensor wrapper_CompositeImplicitAutograd__norm_except_dim(const at::Tensor & v, int64_t pow, int64_t dim) {
3852 // No device check
3853 // DeviceGuard omitted
3854 return at::native::norm_except_dim(v, pow, dim);
3855}
3856} // anonymous namespace
3857namespace {
3858at::Tensor wrapper_CompositeImplicitAutograd___weight_norm(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
3859 // No device check
3860 // DeviceGuard omitted
3861 return at::native::_weight_norm(v, g, dim);
3862}
3863} // anonymous namespace
3864namespace {
3865::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___weight_norm_differentiable_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
3866 // No device check
3867 // DeviceGuard omitted
3868 return at::native::_weight_norm_differentiable_backward(grad_w, saved_v, saved_g, saved_norms, dim);
3869}
3870} // anonymous namespace
3871namespace {
3872at::Tensor wrapper_CompositeImplicitAutograd___sparse_sum(const at::Tensor & self) {
3873 // No device check
3874 // DeviceGuard omitted
3875 return at::native::_sparse_sum(self);
3876}
3877} // anonymous namespace
3878namespace {
3879at::Tensor wrapper_CompositeImplicitAutograd_dtype__sparse_sum(const at::Tensor & self, at::ScalarType dtype) {
3880 // No device check
3881 // DeviceGuard omitted
3882 return at::native::_sparse_sum(self, dtype);
3883}
3884} // anonymous namespace
3885namespace {
3886at::Tensor wrapper_CompositeImplicitAutograd_dim_dtype__sparse_sum(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) {
3887 // No device check
3888 // DeviceGuard omitted
3889 return at::native::_sparse_sum(self, dim, dtype);
3890}
3891} // anonymous namespace
3892namespace {
3893at::Tensor wrapper_CompositeImplicitAutograd_int__sparse_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
3894 // No device check
3895 // DeviceGuard omitted
3896 return at::native::_sparse_softmax(self, dim, dtype);
3897}
3898} // anonymous namespace
3899namespace {
3900at::Tensor wrapper_CompositeImplicitAutograd_Dimname__sparse_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
3901 // No device check
3902 // DeviceGuard omitted
3903 return at::native::_sparse_softmax(self, dim, dtype);
3904}
3905} // anonymous namespace
3906namespace {
3907at::Tensor wrapper_CompositeImplicitAutograd_int__sparse_log_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
3908 // No device check
3909 // DeviceGuard omitted
3910 return at::native::_sparse_log_softmax(self, dim, dtype);
3911}
3912} // anonymous namespace
3913namespace {
3914at::Tensor wrapper_CompositeImplicitAutograd_Dimname__sparse_log_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
3915 // No device check
3916 // DeviceGuard omitted
3917 return at::native::_sparse_log_softmax(self, dim, dtype);
3918}
3919} // anonymous namespace
3920namespace {
3921at::Tensor wrapper_CompositeImplicitAutograd_names_ScalarOpt_dim_dtype_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
3922 // No device check
3923 // DeviceGuard omitted
3924 return at::native::norm(self, p, dim, keepdim, dtype);
3925}
3926} // anonymous namespace
3927namespace {
3928at::Tensor & wrapper_CompositeImplicitAutograd_names_dtype_out_norm_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
3929 // No device check
3930 // DeviceGuard omitted
3931 return at::native::norm_out(self, p, dim, keepdim, dtype, out);
3932}
3933} // anonymous namespace
3934namespace {
3935at::Tensor wrapper_CompositeImplicitAutograd_names_ScalarOpt_dim_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) {
3936 // No device check
3937 // DeviceGuard omitted
3938 return at::native::norm(self, p, dim, keepdim);
3939}
3940} // anonymous namespace
3941namespace {
3942at::Tensor & wrapper_CompositeImplicitAutograd_names_out_norm_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out) {
3943 // No device check
3944 // DeviceGuard omitted
3945 return at::native::norm_out(self, p, dim, keepdim, out);
3946}
3947} // anonymous namespace
3948namespace {
3949at::Tensor wrapper_CompositeImplicitAutograd_dim_frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
3950 // No device check
3951 // DeviceGuard omitted
3952 return at::native::frobenius_norm(self, dim, keepdim);
3953}
3954} // anonymous namespace
3955namespace {
3956at::Tensor & wrapper_CompositeImplicitAutograd_out_frobenius_norm_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
3957 // No device check
3958 // DeviceGuard omitted
3959 return at::native::frobenius_norm_out(self, dim, keepdim, out);
3960}
3961} // anonymous namespace
3962namespace {
3963at::Tensor wrapper_CompositeImplicitAutograd__nuclear_norm(const at::Tensor & self, bool keepdim) {
3964 // No device check
3965 // DeviceGuard omitted
3966 return at::native::nuclear_norm(self, keepdim);
3967}
3968} // anonymous namespace
3969namespace {
3970at::Tensor & wrapper_CompositeImplicitAutograd_out_nuclear_norm_out(const at::Tensor & self, bool keepdim, at::Tensor & out) {
3971 // No device check
3972 // DeviceGuard omitted
3973 return at::native::nuclear_norm_out(self, keepdim, out);
3974}
3975} // anonymous namespace
3976namespace {
3977at::Tensor wrapper_CompositeImplicitAutograd_dim_nuclear_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
3978 // No device check
3979 // DeviceGuard omitted
3980 return at::native::nuclear_norm(self, dim, keepdim);
3981}
3982} // anonymous namespace
3983namespace {
3984at::Tensor & wrapper_CompositeImplicitAutograd_dim_out_nuclear_norm_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
3985 // No device check
3986 // DeviceGuard omitted
3987 return at::native::nuclear_norm_out(self, dim, keepdim, out);
3988}
3989} // anonymous namespace
3990namespace {
3991at::Tensor wrapper_CompositeImplicitAutograd__positive(const at::Tensor & self) {
3992 // No device check
3993 // DeviceGuard omitted
3994 return at::native::positive(self);
3995}
3996} // anonymous namespace
3997namespace {
3998at::Tensor wrapper_CompositeImplicitAutograd_Tensor_subtract(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
3999 // No device check
4000 // DeviceGuard omitted
4001 return at::native::subtract(self, other, alpha);
4002}
4003} // anonymous namespace
4004namespace {
4005at::Tensor & wrapper_CompositeImplicitAutograd_out_subtract_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
4006 // No device check
4007 // DeviceGuard omitted
4008 return at::native::subtract_out(self, other, alpha, out);
4009}
4010} // anonymous namespace
4011namespace {
4012at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_subtract_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
4013 // No device check
4014 // DeviceGuard omitted
4015 return at::native::subtract_(self, other, alpha);
4016}
4017} // anonymous namespace
4018namespace {
4019at::Tensor wrapper_CompositeImplicitAutograd_Scalar_subtract(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
4020 // No device check
4021 // DeviceGuard omitted
4022 return at::native::subtract(self, other, alpha);
4023}
4024} // anonymous namespace
4025namespace {
4026at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_subtract_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
4027 // No device check
4028 // DeviceGuard omitted
4029 return at::native::subtract_(self, other, alpha);
4030}
4031} // anonymous namespace
4032namespace {
4033at::Tensor wrapper_CompositeImplicitAutograd_comp_plain_value_size_sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4034 // No device check
4035 // DeviceGuard omitted
4036 return at::native::sparse_compressed_tensor(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
4037}
4038} // anonymous namespace
4039namespace {
4040at::Tensor wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4041 // No device check
4042 // DeviceGuard omitted
4043 return at::native::sparse_csr_tensor(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
4044}
4045} // anonymous namespace
4046namespace {
4047at::Tensor wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4048 // No device check
4049 // DeviceGuard omitted
4050 return at::native::sparse_csc_tensor(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
4051}
4052} // anonymous namespace
4053namespace {
4054at::Tensor wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4055 // No device check
4056 // DeviceGuard omitted
4057 return at::native::sparse_bsr_tensor(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
4058}
4059} // anonymous namespace
4060namespace {
4061at::Tensor wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4062 // No device check
4063 // DeviceGuard omitted
4064 return at::native::sparse_bsc_tensor(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
4065}
4066} // anonymous namespace
4067namespace {
4068at::Tensor wrapper_CompositeImplicitAutograd_comp_plain_value_sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4069 // No device check
4070 // DeviceGuard omitted
4071 return at::native::sparse_compressed_tensor(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory);
4072}
4073} // anonymous namespace
4074namespace {
4075at::Tensor wrapper_CompositeImplicitAutograd_crow_col_value_sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4076 // No device check
4077 // DeviceGuard omitted
4078 return at::native::sparse_csr_tensor(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
4079}
4080} // anonymous namespace
4081namespace {
4082at::Tensor wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4083 // No device check
4084 // DeviceGuard omitted
4085 return at::native::sparse_csc_tensor(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
4086}
4087} // anonymous namespace
4088namespace {
4089at::Tensor wrapper_CompositeImplicitAutograd_crow_col_value_sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4090 // No device check
4091 // DeviceGuard omitted
4092 return at::native::sparse_bsr_tensor(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
4093}
4094} // anonymous namespace
4095namespace {
4096at::Tensor wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4097 // No device check
4098 // DeviceGuard omitted
4099 return at::native::sparse_bsc_tensor(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
4100}
4101} // anonymous namespace
4102namespace {
4103at::Tensor wrapper_CompositeImplicitAutograd___sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4104 // No device check
4105 // DeviceGuard omitted
4106 return at::native::_sparse_compressed_tensor_unsafe(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
4107}
4108} // anonymous namespace
4109namespace {
4110at::Tensor wrapper_CompositeImplicitAutograd___sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4111 // No device check
4112 // DeviceGuard omitted
4113 return at::native::_sparse_csr_tensor_unsafe(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
4114}
4115} // anonymous namespace
4116namespace {
4117at::Tensor wrapper_CompositeImplicitAutograd___sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4118 // No device check
4119 // DeviceGuard omitted
4120 return at::native::_sparse_csc_tensor_unsafe(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
4121}
4122} // anonymous namespace
4123namespace {
4124at::Tensor wrapper_CompositeImplicitAutograd___sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4125 // No device check
4126 // DeviceGuard omitted
4127 return at::native::_sparse_bsr_tensor_unsafe(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
4128}
4129} // anonymous namespace
4130namespace {
4131at::Tensor wrapper_CompositeImplicitAutograd___sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4132 // No device check
4133 // DeviceGuard omitted
4134 return at::native::_sparse_bsc_tensor_unsafe(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
4135}
4136} // anonymous namespace
4137namespace {
4138at::Tensor wrapper_CompositeImplicitAutograd_indices_sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4139 // No device check
4140 // DeviceGuard omitted
4141 return at::native::sparse_coo_tensor(indices, values, dtype, layout, device, pin_memory);
4142}
4143} // anonymous namespace
4144namespace {
4145at::Tensor wrapper_CompositeImplicitAutograd_indices_size_sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4146 // No device check
4147 // DeviceGuard omitted
4148 return at::native::sparse_coo_tensor(indices, values, size, dtype, layout, device, pin_memory);
4149}
4150} // anonymous namespace
4151namespace {
4152at::Tensor wrapper_CompositeImplicitAutograd___sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4153 // No device check
4154 // DeviceGuard omitted
4155 return at::native::_sparse_coo_tensor_unsafe_symint(indices, values, size, dtype, layout, device, pin_memory);
4156}
4157} // anonymous namespace
4158namespace {
4159void wrapper_CompositeImplicitAutograd___validate_sparse_coo_tensor_args(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size) {
4160 // No device check
4161 // DeviceGuard omitted
4162 return at::native::_validate_sparse_coo_tensor_args(indices, values, size);
4163}
4164} // anonymous namespace
4165namespace {
4166void wrapper_CompositeImplicitAutograd___validate_sparse_compressed_tensor_args(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) {
4167 // No device check
4168 // DeviceGuard omitted
4169 return at::native::_validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, values, size, layout);
4170}
4171} // anonymous namespace
4172namespace {
4173void wrapper_CompositeImplicitAutograd___validate_sparse_csr_tensor_args(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
4174 // No device check
4175 // DeviceGuard omitted
4176 return at::native::_validate_sparse_csr_tensor_args(crow_indices, col_indices, values, size);
4177}
4178} // anonymous namespace
4179namespace {
4180void wrapper_CompositeImplicitAutograd___validate_sparse_csc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
4181 // No device check
4182 // DeviceGuard omitted
4183 return at::native::_validate_sparse_csc_tensor_args(ccol_indices, row_indices, values, size);
4184}
4185} // anonymous namespace
4186namespace {
4187void wrapper_CompositeImplicitAutograd___validate_sparse_bsr_tensor_args(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
4188 // No device check
4189 // DeviceGuard omitted
4190 return at::native::_validate_sparse_bsr_tensor_args(crow_indices, col_indices, values, size);
4191}
4192} // anonymous namespace
4193namespace {
4194void wrapper_CompositeImplicitAutograd___validate_sparse_bsc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
4195 // No device check
4196 // DeviceGuard omitted
4197 return at::native::_validate_sparse_bsc_tensor_args(ccol_indices, row_indices, values, size);
4198}
4199} // anonymous namespace
4200namespace {
4201::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd___to_cpu(at::TensorList tensors) {
4202 // No device check
4203 // DeviceGuard omitted
4204 return at::native::_to_cpu(tensors);
4205}
4206} // anonymous namespace
4207namespace {
4208at::Tensor wrapper_CompositeImplicitAutograd__to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
4209 // No device check
4210 // DeviceGuard omitted
4211 return at::native::to_dense(self, dtype);
4212}
4213} // anonymous namespace
4214namespace {
4215at::Tensor wrapper_CompositeImplicitAutograd__to_dense_backward(const at::Tensor & grad, const at::Tensor & input) {
4216 // No device check
4217 // DeviceGuard omitted
4218 return at::native::to_dense_backward(grad, input);
4219}
4220} // anonymous namespace
4221namespace {
4222at::Tensor wrapper_CompositeImplicitAutograd__coalesce(const at::Tensor & self) {
4223 // No device check
4224 // DeviceGuard omitted
4225 return at::native::coalesce(self);
4226}
4227} // anonymous namespace
4228namespace {
4229::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_Dimname_unbind(const at::Tensor & self, at::Dimname dim) {
4230 // No device check
4231 // DeviceGuard omitted
4232 return at::native::unbind(self, dim);
4233}
4234} // anonymous namespace
4235namespace {
4236at::Tensor wrapper_CompositeImplicitAutograd__to_mkldnn_backward(const at::Tensor & grad, const at::Tensor & input) {
4237 // No device check
4238 // DeviceGuard omitted
4239 return at::native::to_mkldnn_backward(grad, input);
4240}
4241} // anonymous namespace
4242namespace {
4243at::Tensor wrapper_CompositeImplicitAutograd__fake_quantize_per_tensor_affine(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
4244 // No device check
4245 // DeviceGuard omitted
4246 return at::native::fake_quantize_per_tensor_affine(self, scale, zero_point, quant_min, quant_max);
4247}
4248} // anonymous namespace
4249namespace {
4250at::Tensor wrapper_CompositeImplicitAutograd_tensor_qparams_fake_quantize_per_tensor_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
4251 // No device check
4252 // DeviceGuard omitted
4253 return at::native::fake_quantize_per_tensor_affine(self, scale, zero_point, quant_min, quant_max);
4254}
4255} // anonymous namespace
4256namespace {
4257at::Tensor wrapper_CompositeImplicitAutograd__fake_quantize_per_tensor_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask) {
4258 // No device check
4259 // DeviceGuard omitted
4260 return at::native::fake_quantize_per_tensor_affine_cachemask_backward(grad, mask);
4261}
4262} // anonymous namespace
4263namespace {
4264at::Tensor wrapper_CompositeImplicitAutograd__fake_quantize_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
4265 // No device check
4266 // DeviceGuard omitted
4267 return at::native::fake_quantize_per_channel_affine(self, scale, zero_point, axis, quant_min, quant_max);
4268}
4269} // anonymous namespace
4270namespace {
4271at::Tensor wrapper_CompositeImplicitAutograd__fake_quantize_per_channel_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask) {
4272 // No device check
4273 // DeviceGuard omitted
4274 return at::native::fake_quantize_per_channel_affine_cachemask_backward(grad, mask);
4275}
4276} // anonymous namespace
4277namespace {
4278at::Tensor wrapper_CompositeImplicitAutograd__fused_moving_avg_obs_fake_quant(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
4279 // No device check
4280 // DeviceGuard omitted
4281 return at::native::fused_moving_avg_obs_fake_quant(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
4282}
4283} // anonymous namespace
4284namespace {
4285::std::tuple<double,int64_t> wrapper_CompositeImplicitAutograd___choose_qparams_per_tensor(const at::Tensor & self, bool reduce_range) {
4286 // No device check
4287 // DeviceGuard omitted
4288 return at::native::_choose_qparams_per_tensor(self, reduce_range);
4289}
4290} // anonymous namespace
4291namespace {
4292at::Tensor wrapper_CompositeImplicitAutograd___saturate_weight_to_fp16(const at::Tensor & weight) {
4293 // No device check
4294 // DeviceGuard omitted
4295 return at::native::_saturate_weight_to_fp16(weight);
4296}
4297} // anonymous namespace
4298namespace {
4299::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__choose_qparams_optimized(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) {
4300 // No device check
4301 // DeviceGuard omitted
4302 return at::native::choose_qparams_optimized(input, numel, n_bins, ratio, bit_width);
4303}
4304} // anonymous namespace
4305namespace {
4306at::Tensor wrapper_CompositeImplicitAutograd___autocast_to_reduced_precision(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) {
4307 // No device check
4308 // DeviceGuard omitted
4309 return at::native::_autocast_to_reduced_precision(self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
4310}
4311} // anonymous namespace
4312namespace {
4313at::Tensor wrapper_CompositeImplicitAutograd___autocast_to_full_precision(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) {
4314 // No device check
4315 // DeviceGuard omitted
4316 return at::native::_autocast_to_full_precision(self, cuda_enabled, cpu_enabled);
4317}
4318} // anonymous namespace
4319namespace {
4320at::Tensor wrapper_CompositeImplicitAutograd_dtype_layout_to(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
4321 // No device check
4322 // DeviceGuard omitted
4323 return at::native::to(self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
4324}
4325} // anonymous namespace
4326namespace {
4327at::Tensor wrapper_CompositeImplicitAutograd_device_to(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
4328 // No device check
4329 // DeviceGuard omitted
4330 return at::native::to(self, device, dtype, non_blocking, copy, memory_format);
4331}
4332} // anonymous namespace
4333namespace {
4334at::Tensor wrapper_CompositeImplicitAutograd_dtype_to(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
4335 // No device check
4336 // DeviceGuard omitted
4337 return at::native::to(self, dtype, non_blocking, copy, memory_format);
4338}
4339} // anonymous namespace
4340namespace {
4341at::Tensor wrapper_CompositeImplicitAutograd_other_to(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
4342 // No device check
4343 // DeviceGuard omitted
4344 return at::native::to(self, other, non_blocking, copy, memory_format);
4345}
4346} // anonymous namespace
4347namespace {
4348::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__meshgrid(at::TensorList tensors) {
4349 // No device check
4350 // DeviceGuard omitted
4351 return at::native::meshgrid(tensors);
4352}
4353} // anonymous namespace
4354namespace {
4355::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_indexing_meshgrid(at::TensorList tensors, c10::string_view indexing) {
4356 // No device check
4357 // DeviceGuard omitted
4358 return at::native::meshgrid(tensors, indexing);
4359}
4360} // anonymous namespace
4361namespace {
4362at::Tensor wrapper_CompositeImplicitAutograd__cartesian_prod(at::TensorList tensors) {
4363 // No device check
4364 // DeviceGuard omitted
4365 return at::native::cartesian_prod(tensors);
4366}
4367} // anonymous namespace
4368namespace {
4369at::Tensor wrapper_CompositeImplicitAutograd__combinations(const at::Tensor & self, int64_t r, bool with_replacement) {
4370 // No device check
4371 // DeviceGuard omitted
4372 return at::native::combinations(self, r, with_replacement);
4373}
4374} // anonymous namespace
4375namespace {
4376at::Scalar wrapper_CompositeImplicitAutograd__item(const at::Tensor & self) {
4377 // No device check
4378 // DeviceGuard omitted
4379 return at::native::item(self);
4380}
4381} // anonymous namespace
4382namespace {
4383at::ScalarType wrapper_CompositeImplicitAutograd_Tensor_result_type(const at::Tensor & tensor, const at::Tensor & other) {
4384 // No device check
4385 // DeviceGuard omitted
4386 return at::native::result_type(tensor, other);
4387}
4388} // anonymous namespace
4389namespace {
4390at::ScalarType wrapper_CompositeImplicitAutograd_Scalar_result_type(const at::Tensor & tensor, const at::Scalar & other) {
4391 // No device check
4392 // DeviceGuard omitted
4393 return at::native::result_type(tensor, other);
4394}
4395} // anonymous namespace
4396namespace {
4397at::ScalarType wrapper_CompositeImplicitAutograd_Scalar_Tensor_result_type(const at::Scalar & scalar, const at::Tensor & tensor) {
4398 // No device check
4399 // DeviceGuard omitted
4400 return at::native::result_type(scalar, tensor);
4401}
4402} // anonymous namespace
4403namespace {
4404at::ScalarType wrapper_CompositeImplicitAutograd_Scalar_Scalar_result_type(const at::Scalar & scalar1, const at::Scalar & scalar2) {
4405 // No device check
4406 // DeviceGuard omitted
4407 return at::native::result_type(scalar1, scalar2);
4408}
4409} // anonymous namespace
4410namespace {
4411bool wrapper_CompositeImplicitAutograd__can_cast(at::ScalarType from, at::ScalarType to) {
4412 // No device check
4413 // DeviceGuard omitted
4414 return at::native::can_cast(from, to);
4415}
4416} // anonymous namespace
4417namespace {
4418at::ScalarType wrapper_CompositeImplicitAutograd__promote_types(at::ScalarType type1, at::ScalarType type2) {
4419 // No device check
4420 // DeviceGuard omitted
4421 return at::native::promote_types(type1, type2);
4422}
4423} // anonymous namespace
4424namespace {
4425::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___thnn_fused_lstm_cell_backward(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
4426 // No device check
4427 // DeviceGuard omitted
4428 return at::native::_thnn_fused_lstm_cell_backward(grad_hy, grad_cy, cx, cy, workspace, has_bias);
4429}
4430} // anonymous namespace
4431namespace {
4432::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___thnn_differentiable_lstm_cell_backward(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) {
4433 // No device check
4434 // DeviceGuard omitted
4435 return at::native::_thnn_differentiable_lstm_cell_backward(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy);
4436}
4437} // anonymous namespace
4438namespace {
4439::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___thnn_differentiable_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
4440 // No device check
4441 // DeviceGuard omitted
4442 return at::native::_thnn_differentiable_gru_cell_backward(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias);
4443}
4444} // anonymous namespace
4445namespace {
4446::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_input_lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
4447 // No device check
4448 // DeviceGuard omitted
4449 return at::native::lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
4450}
4451} // anonymous namespace
4452namespace {
4453::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_data_lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
4454 // No device check
4455 // DeviceGuard omitted
4456 return at::native::lstm(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
4457}
4458} // anonymous namespace
4459namespace {
4460::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_input_gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
4461 // No device check
4462 // DeviceGuard omitted
4463 return at::native::gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
4464}
4465} // anonymous namespace
4466namespace {
4467::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_data_gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
4468 // No device check
4469 // DeviceGuard omitted
4470 return at::native::gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
4471}
4472} // anonymous namespace
4473namespace {
4474::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_input_rnn_tanh(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
4475 // No device check
4476 // DeviceGuard omitted
4477 return at::native::rnn_tanh(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
4478}
4479} // anonymous namespace
4480namespace {
4481::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_data_rnn_tanh(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
4482 // No device check
4483 // DeviceGuard omitted
4484 return at::native::rnn_tanh(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
4485}
4486} // anonymous namespace
4487namespace {
4488::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_input_rnn_relu(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
4489 // No device check
4490 // DeviceGuard omitted
4491 return at::native::rnn_relu(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
4492}
4493} // anonymous namespace
4494namespace {
4495::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_data_rnn_relu(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
4496 // No device check
4497 // DeviceGuard omitted
4498 return at::native::rnn_relu(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
4499}
4500} // anonymous namespace
4501namespace {
4502::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
4503 // No device check
4504 // DeviceGuard omitted
4505 return at::native::lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
4506}
4507} // anonymous namespace
4508namespace {
4509at::Tensor wrapper_CompositeImplicitAutograd__gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
4510 // No device check
4511 // DeviceGuard omitted
4512 return at::native::gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
4513}
4514} // anonymous namespace
4515namespace {
4516at::Tensor wrapper_CompositeImplicitAutograd__rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
4517 // No device check
4518 // DeviceGuard omitted
4519 return at::native::rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
4520}
4521} // anonymous namespace
4522namespace {
4523at::Tensor wrapper_CompositeImplicitAutograd__rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
4524 // No device check
4525 // DeviceGuard omitted
4526 return at::native::rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
4527}
4528} // anonymous namespace
4529namespace {
4530::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__quantized_lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
4531 // No device check
4532 // DeviceGuard omitted
4533 return at::native::quantized_lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
4534}
4535} // anonymous namespace
4536namespace {
4537at::Tensor wrapper_CompositeImplicitAutograd__quantized_gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
4538 // No device check
4539 // DeviceGuard omitted
4540 return at::native::quantized_gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
4541}
4542} // anonymous namespace
4543namespace {
4544at::Tensor wrapper_CompositeImplicitAutograd__quantized_rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
4545 // No device check
4546 // DeviceGuard omitted
4547 return at::native::quantized_rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
4548}
4549} // anonymous namespace
4550namespace {
4551at::Tensor wrapper_CompositeImplicitAutograd__quantized_rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
4552 // No device check
4553 // DeviceGuard omitted
4554 return at::native::quantized_rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
4555}
4556} // anonymous namespace
4557namespace {
4558at::Tensor wrapper_CompositeImplicitAutograd___pack_padded_sequence_backward(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
4559 // No device check
4560 // DeviceGuard omitted
4561 return at::native::_pack_padded_sequence_backward_symint(grad, input_size, batch_sizes, batch_first);
4562}
4563} // anonymous namespace
4564namespace {
4565::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___pad_packed_sequence(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) {
4566 // No device check
4567 // DeviceGuard omitted
4568 return at::native::_pad_packed_sequence(data, batch_sizes, batch_first, padding_value, total_length);
4569}
4570} // anonymous namespace
4571namespace {
4572at::Tensor & wrapper_CompositeImplicitAutograd_source_Tensor_storage_offset_set_(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
4573 // No device check
4574 // DeviceGuard omitted
4575 return at::native::set__symint(self, source, storage_offset, size, stride);
4576}
4577} // anonymous namespace
4578namespace {
4579at::Tensor wrapper_CompositeImplicitAutograd_dimname_index_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
4580 // No device check
4581 // DeviceGuard omitted
4582 return at::native::index_add(self, dim, index, source, alpha);
4583}
4584} // anonymous namespace
4585namespace {
4586at::Tensor & wrapper_CompositeImplicitAutograd_Dimname_Scalar_index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
4587 // No device check
4588 // DeviceGuard omitted
4589 return at::native::index_fill_(self, dim, index, value);
4590}
4591} // anonymous namespace
4592namespace {
4593at::Tensor wrapper_CompositeImplicitAutograd_Dimname_Scalar_index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
4594 // No device check
4595 // DeviceGuard omitted
4596 return at::native::index_fill(self, dim, index, value);
4597}
4598} // anonymous namespace
4599namespace {
4600at::Tensor & wrapper_CompositeImplicitAutograd_Dimname_Tensor_index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
4601 // No device check
4602 // DeviceGuard omitted
4603 return at::native::index_fill_(self, dim, index, value);
4604}
4605} // anonymous namespace
4606namespace {
4607at::Tensor wrapper_CompositeImplicitAutograd_Dimname_Tensor_index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
4608 // No device check
4609 // DeviceGuard omitted
4610 return at::native::index_fill(self, dim, index, value);
4611}
4612} // anonymous namespace
4613namespace {
4614at::Tensor wrapper_CompositeImplicitAutograd_dimname_src_scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
4615 // No device check
4616 // DeviceGuard omitted
4617 return at::native::scatter(self, dim, index, src);
4618}
4619} // anonymous namespace
4620namespace {
4621at::Tensor wrapper_CompositeImplicitAutograd_dimname_value_scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
4622 // No device check
4623 // DeviceGuard omitted
4624 return at::native::scatter(self, dim, index, value);
4625}
4626} // anonymous namespace
4627namespace {
4628at::Tensor wrapper_CompositeImplicitAutograd_dimname_scatter_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
4629 // No device check
4630 // DeviceGuard omitted
4631 return at::native::scatter_add(self, dim, index, src);
4632}
4633} // anonymous namespace
4634namespace {
4635at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_bitwise_and_(at::Tensor & self, const at::Scalar & other) {
4636 // No device check
4637 // DeviceGuard omitted
4638 return at::native::bitwise_and_(self, other);
4639}
4640} // anonymous namespace
4641namespace {
4642at::Tensor wrapper_CompositeImplicitAutograd_Scalar___and__(const at::Tensor & self, const at::Scalar & other) {
4643 // No device check
4644 // DeviceGuard omitted
4645 return at::native::__and__(self, other);
4646}
4647} // anonymous namespace
4648namespace {
4649at::Tensor & wrapper_CompositeImplicitAutograd_Scalar___iand__(at::Tensor & self, const at::Scalar & other) {
4650 // No device check
4651 // DeviceGuard omitted
4652 return at::native::__iand__(self, other);
4653}
4654} // anonymous namespace
4655namespace {
4656at::Tensor wrapper_CompositeImplicitAutograd_Tensor___and__(const at::Tensor & self, const at::Tensor & other) {
4657 // No device check
4658 // DeviceGuard omitted
4659 return at::native::__and__(self, other);
4660}
4661} // anonymous namespace
4662namespace {
4663at::Tensor & wrapper_CompositeImplicitAutograd_Tensor___iand__(at::Tensor & self, const at::Tensor & other) {
4664 // No device check
4665 // DeviceGuard omitted
4666 return at::native::__iand__(self, other);
4667}
4668} // anonymous namespace
4669namespace {
4670at::Tensor wrapper_CompositeImplicitAutograd_Scalar_bitwise_or(const at::Tensor & self, const at::Scalar & other) {
4671 // No device check
4672 // DeviceGuard omitted
4673 return at::native::bitwise_or(self, other);
4674}
4675} // anonymous namespace
4676namespace {
4677at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_bitwise_or_(at::Tensor & self, const at::Scalar & other) {
4678 // No device check
4679 // DeviceGuard omitted
4680 return at::native::bitwise_or_(self, other);
4681}
4682} // anonymous namespace
4683namespace {
4684at::Tensor wrapper_CompositeImplicitAutograd_Scalar___or__(const at::Tensor & self, const at::Scalar & other) {
4685 // No device check
4686 // DeviceGuard omitted
4687 return at::native::__or__(self, other);
4688}
4689} // anonymous namespace
4690namespace {
4691at::Tensor & wrapper_CompositeImplicitAutograd_Scalar___ior__(at::Tensor & self, const at::Scalar & other) {
4692 // No device check
4693 // DeviceGuard omitted
4694 return at::native::__ior__(self, other);
4695}
4696} // anonymous namespace
4697namespace {
4698at::Tensor wrapper_CompositeImplicitAutograd_Tensor___or__(const at::Tensor & self, const at::Tensor & other) {
4699 // No device check
4700 // DeviceGuard omitted
4701 return at::native::__or__(self, other);
4702}
4703} // anonymous namespace
4704namespace {
4705at::Tensor & wrapper_CompositeImplicitAutograd_Tensor___ior__(at::Tensor & self, const at::Tensor & other) {
4706 // No device check
4707 // DeviceGuard omitted
4708 return at::native::__ior__(self, other);
4709}
4710} // anonymous namespace
4711namespace {
4712at::Tensor wrapper_CompositeImplicitAutograd_Scalar_bitwise_xor(const at::Tensor & self, const at::Scalar & other) {
4713 // No device check
4714 // DeviceGuard omitted
4715 return at::native::bitwise_xor(self, other);
4716}
4717} // anonymous namespace
4718namespace {
4719at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_bitwise_xor_(at::Tensor & self, const at::Scalar & other) {
4720 // No device check
4721 // DeviceGuard omitted
4722 return at::native::bitwise_xor_(self, other);
4723}
4724} // anonymous namespace
4725namespace {
4726at::Tensor wrapper_CompositeImplicitAutograd_Scalar___xor__(const at::Tensor & self, const at::Scalar & other) {
4727 // No device check
4728 // DeviceGuard omitted
4729 return at::native::__xor__(self, other);
4730}
4731} // anonymous namespace
4732namespace {
4733at::Tensor & wrapper_CompositeImplicitAutograd_Scalar___ixor__(at::Tensor & self, const at::Scalar & other) {
4734 // No device check
4735 // DeviceGuard omitted
4736 return at::native::__ixor__(self, other);
4737}
4738} // anonymous namespace
4739namespace {
4740at::Tensor wrapper_CompositeImplicitAutograd_Tensor___xor__(const at::Tensor & self, const at::Tensor & other) {
4741 // No device check
4742 // DeviceGuard omitted
4743 return at::native::__xor__(self, other);
4744}
4745} // anonymous namespace
4746namespace {
4747at::Tensor & wrapper_CompositeImplicitAutograd_Tensor___ixor__(at::Tensor & self, const at::Tensor & other) {
4748 // No device check
4749 // DeviceGuard omitted
4750 return at::native::__ixor__(self, other);
4751}
4752} // anonymous namespace
4753namespace {
4754at::Tensor wrapper_CompositeImplicitAutograd__diag(const at::Tensor & self, int64_t diagonal) {
4755 // No device check
4756 // DeviceGuard omitted
4757 return at::native::diag(self, diagonal);
4758}
4759} // anonymous namespace
4760namespace {
4761at::Tensor & wrapper_CompositeImplicitAutograd_out_diag_out(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
4762 // No device check
4763 // DeviceGuard omitted
4764 return at::native::diag_out(self, diagonal, out);
4765}
4766} // anonymous namespace
4767namespace {
4768at::Tensor wrapper_CompositeImplicitAutograd__cross(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim) {
4769 // No device check
4770 // DeviceGuard omitted
4771 return at::native::cross(self, other, dim);
4772}
4773} // anonymous namespace
4774namespace {
4775at::Tensor & wrapper_CompositeImplicitAutograd_out_cross_out(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim, at::Tensor & out) {
4776 // No device check
4777 // DeviceGuard omitted
4778 return at::native::cross_out(self, other, dim, out);
4779}
4780} // anonymous namespace
4781namespace {
4782at::Tensor wrapper_CompositeImplicitAutograd__trace_backward(const at::Tensor & grad, c10::SymIntArrayRef sizes) {
4783 // No device check
4784 // DeviceGuard omitted
4785 return at::native::trace_backward_symint(grad, sizes);
4786}
4787} // anonymous namespace
4788namespace {
4789at::Tensor wrapper_CompositeImplicitAutograd_Scalar_not_equal(const at::Tensor & self, const at::Scalar & other) {
4790 // No device check
4791 // DeviceGuard omitted
4792 return at::native::not_equal(self, other);
4793}
4794} // anonymous namespace
4795namespace {
4796at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_out_not_equal_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
4797 // No device check
4798 // DeviceGuard omitted
4799 return at::native::not_equal_out(self, other, out);
4800}
4801} // anonymous namespace
4802namespace {
4803at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_not_equal_(at::Tensor & self, const at::Scalar & other) {
4804 // No device check
4805 // DeviceGuard omitted
4806 return at::native::not_equal_(self, other);
4807}
4808} // anonymous namespace
4809namespace {
4810at::Tensor wrapper_CompositeImplicitAutograd_Tensor_not_equal(const at::Tensor & self, const at::Tensor & other) {
4811 // No device check
4812 // DeviceGuard omitted
4813 return at::native::not_equal(self, other);
4814}
4815} // anonymous namespace
4816namespace {
4817at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_out_not_equal_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4818 // No device check
4819 // DeviceGuard omitted
4820 return at::native::not_equal_out(self, other, out);
4821}
4822} // anonymous namespace
4823namespace {
4824at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_not_equal_(at::Tensor & self, const at::Tensor & other) {
4825 // No device check
4826 // DeviceGuard omitted
4827 return at::native::not_equal_(self, other);
4828}
4829} // anonymous namespace
4830namespace {
4831at::Tensor wrapper_CompositeImplicitAutograd_Scalar_greater_equal(const at::Tensor & self, const at::Scalar & other) {
4832 // No device check
4833 // DeviceGuard omitted
4834 return at::native::greater_equal(self, other);
4835}
4836} // anonymous namespace
4837namespace {
4838at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_out_greater_equal_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
4839 // No device check
4840 // DeviceGuard omitted
4841 return at::native::greater_equal_out(self, other, out);
4842}
4843} // anonymous namespace
4844namespace {
4845at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_greater_equal_(at::Tensor & self, const at::Scalar & other) {
4846 // No device check
4847 // DeviceGuard omitted
4848 return at::native::greater_equal_(self, other);
4849}
4850} // anonymous namespace
4851namespace {
4852at::Tensor wrapper_CompositeImplicitAutograd_Tensor_greater_equal(const at::Tensor & self, const at::Tensor & other) {
4853 // No device check
4854 // DeviceGuard omitted
4855 return at::native::greater_equal(self, other);
4856}
4857} // anonymous namespace
4858namespace {
4859at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_out_greater_equal_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4860 // No device check
4861 // DeviceGuard omitted
4862 return at::native::greater_equal_out(self, other, out);
4863}
4864} // anonymous namespace
4865namespace {
4866at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_greater_equal_(at::Tensor & self, const at::Tensor & other) {
4867 // No device check
4868 // DeviceGuard omitted
4869 return at::native::greater_equal_(self, other);
4870}
4871} // anonymous namespace
4872namespace {
4873at::Tensor wrapper_CompositeImplicitAutograd_Scalar_less_equal(const at::Tensor & self, const at::Scalar & other) {
4874 // No device check
4875 // DeviceGuard omitted
4876 return at::native::less_equal(self, other);
4877}
4878} // anonymous namespace
4879namespace {
4880at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_out_less_equal_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
4881 // No device check
4882 // DeviceGuard omitted
4883 return at::native::less_equal_out(self, other, out);
4884}
4885} // anonymous namespace
4886namespace {
4887at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_less_equal_(at::Tensor & self, const at::Scalar & other) {
4888 // No device check
4889 // DeviceGuard omitted
4890 return at::native::less_equal_(self, other);
4891}
4892} // anonymous namespace
4893namespace {
4894at::Tensor wrapper_CompositeImplicitAutograd_Tensor_less_equal(const at::Tensor & self, const at::Tensor & other) {
4895 // No device check
4896 // DeviceGuard omitted
4897 return at::native::less_equal(self, other);
4898}
4899} // anonymous namespace
4900namespace {
4901at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_out_less_equal_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4902 // No device check
4903 // DeviceGuard omitted
4904 return at::native::less_equal_out(self, other, out);
4905}
4906} // anonymous namespace
4907namespace {
4908at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_less_equal_(at::Tensor & self, const at::Tensor & other) {
4909 // No device check
4910 // DeviceGuard omitted
4911 return at::native::less_equal_(self, other);
4912}
4913} // anonymous namespace
4914namespace {
4915at::Tensor wrapper_CompositeImplicitAutograd_Scalar_greater(const at::Tensor & self, const at::Scalar & other) {
4916 // No device check
4917 // DeviceGuard omitted
4918 return at::native::greater(self, other);
4919}
4920} // anonymous namespace
4921namespace {
4922at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_out_greater_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
4923 // No device check
4924 // DeviceGuard omitted
4925 return at::native::greater_out(self, other, out);
4926}
4927} // anonymous namespace
4928namespace {
4929at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_greater_(at::Tensor & self, const at::Scalar & other) {
4930 // No device check
4931 // DeviceGuard omitted
4932 return at::native::greater_(self, other);
4933}
4934} // anonymous namespace
4935namespace {
4936at::Tensor wrapper_CompositeImplicitAutograd_Tensor_greater(const at::Tensor & self, const at::Tensor & other) {
4937 // No device check
4938 // DeviceGuard omitted
4939 return at::native::greater(self, other);
4940}
4941} // anonymous namespace
4942namespace {
4943at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_out_greater_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4944 // No device check
4945 // DeviceGuard omitted
4946 return at::native::greater_out(self, other, out);
4947}
4948} // anonymous namespace
4949namespace {
4950at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_greater_(at::Tensor & self, const at::Tensor & other) {
4951 // No device check
4952 // DeviceGuard omitted
4953 return at::native::greater_(self, other);
4954}
4955} // anonymous namespace
4956namespace {
4957at::Tensor wrapper_CompositeImplicitAutograd_Scalar_less(const at::Tensor & self, const at::Scalar & other) {
4958 // No device check
4959 // DeviceGuard omitted
4960 return at::native::less(self, other);
4961}
4962} // anonymous namespace
4963namespace {
4964at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_out_less_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
4965 // No device check
4966 // DeviceGuard omitted
4967 return at::native::less_out(self, other, out);
4968}
4969} // anonymous namespace
4970namespace {
4971at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_less_(at::Tensor & self, const at::Scalar & other) {
4972 // No device check
4973 // DeviceGuard omitted
4974 return at::native::less_(self, other);
4975}
4976} // anonymous namespace
4977namespace {
4978at::Tensor wrapper_CompositeImplicitAutograd_Tensor_less(const at::Tensor & self, const at::Tensor & other) {
4979 // No device check
4980 // DeviceGuard omitted
4981 return at::native::less(self, other);
4982}
4983} // anonymous namespace
4984namespace {
4985at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_out_less_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4986 // No device check
4987 // DeviceGuard omitted
4988 return at::native::less_out(self, other, out);
4989}
4990} // anonymous namespace
4991namespace {
4992at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_less_(at::Tensor & self, const at::Tensor & other) {
4993 // No device check
4994 // DeviceGuard omitted
4995 return at::native::less_(self, other);
4996}
4997} // anonymous namespace
4998namespace {
4999at::Tensor wrapper_CompositeImplicitAutograd__take_along_dim(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim) {
5000 // No device check
5001 // DeviceGuard omitted
5002 return at::native::take_along_dim(self, indices, dim);
5003}
5004} // anonymous namespace
5005namespace {
5006at::Tensor & wrapper_CompositeImplicitAutograd_out_take_along_dim_out(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim, at::Tensor & out) {
5007 // No device check
5008 // DeviceGuard omitted
5009 return at::native::take_along_dim_out(self, indices, dim, out);
5010}
5011} // anonymous namespace
5012namespace {
5013at::Tensor wrapper_CompositeImplicitAutograd_dimname_index_select(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
5014 // No device check
5015 // DeviceGuard omitted
5016 return at::native::index_select(self, dim, index);
5017}
5018} // anonymous namespace
5019namespace {
5020at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_index_select_out(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) {
5021 // No device check
5022 // DeviceGuard omitted
5023 return at::native::index_select_out(self, dim, index, out);
5024}
5025} // anonymous namespace
5026namespace {
5027at::Tensor wrapper_CompositeImplicitAutograd__index_select_backward(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
5028 // No device check
5029 // DeviceGuard omitted
5030 return at::native::index_select_backward_symint(grad, self_sizes, dim, index);
5031}
5032} // anonymous namespace
5033namespace {
5034at::Tensor wrapper_CompositeImplicitAutograd__masked_select_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) {
5035 // No device check
5036 // DeviceGuard omitted
5037 return at::native::masked_select_backward(grad, input, mask);
5038}
5039} // anonymous namespace
5040namespace {
5041::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__nonzero_numpy(const at::Tensor & self) {
5042 // No device check
5043 // DeviceGuard omitted
5044 return at::native::nonzero_numpy(self);
5045}
5046} // anonymous namespace
5047namespace {
5048at::Tensor wrapper_CompositeImplicitAutograd__argwhere(const at::Tensor & self) {
5049 // No device check
5050 // DeviceGuard omitted
5051 return at::native::argwhere(self);
5052}
5053} // anonymous namespace
5054namespace {
5055at::Tensor wrapper_CompositeImplicitAutograd__gather_backward(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
5056 // No device check
5057 // DeviceGuard omitted
5058 return at::native::gather_backward(grad, self, dim, index, sparse_grad);
5059}
5060} // anonymous namespace
5061namespace {
5062at::Tensor wrapper_CompositeImplicitAutograd_dimname_gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) {
5063 // No device check
5064 // DeviceGuard omitted
5065 return at::native::gather(self, dim, index, sparse_grad);
5066}
5067} // anonymous namespace
5068namespace {
5069at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_gather_out(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
5070 // No device check
5071 // DeviceGuard omitted
5072 return at::native::gather_out(self, dim, index, sparse_grad, out);
5073}
5074} // anonymous namespace
5075namespace {
5076at::Tensor wrapper_CompositeImplicitAutograd___gather_sparse_backward(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) {
5077 // No device check
5078 // DeviceGuard omitted
5079 return at::native::_gather_sparse_backward(self, dim, index, grad);
5080}
5081} // anonymous namespace
5082namespace {
5083at::Tensor wrapper_CompositeImplicitAutograd__cross_entropy_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) {
5084 // No device check
5085 // DeviceGuard omitted
5086 return at::native::cross_entropy_loss_symint(self, target, weight, reduction, ignore_index, label_smoothing);
5087}
5088} // anonymous namespace
5089namespace {
5090at::Tensor wrapper_CompositeImplicitAutograd__linalg_vander(const at::Tensor & x, c10::optional<int64_t> N) {
5091 // No device check
5092 // DeviceGuard omitted
5093 return at::native::linalg_vander(x, N);
5094}
5095} // anonymous namespace
5096namespace {
5097::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__svd(const at::Tensor & self, bool some, bool compute_uv) {
5098 // No device check
5099 // DeviceGuard omitted
5100 return at::native::svd(self, some, compute_uv);
5101}
5102} // anonymous namespace
5103namespace {
5104::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_U_svd_out(const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) {
5105 // No device check
5106 // DeviceGuard omitted
5107 return at::native::svd_out(self, some, compute_uv, U, S, V);
5108}
5109} // anonymous namespace
5110namespace {
5111at::Tensor wrapper_CompositeImplicitAutograd__swapaxes(const at::Tensor & self, int64_t axis0, int64_t axis1) {
5112 // No device check
5113 // DeviceGuard omitted
5114 return at::native::swapaxes(self, axis0, axis1);
5115}
5116} // anonymous namespace
5117namespace {
5118at::Tensor & wrapper_CompositeImplicitAutograd__swapaxes_(at::Tensor & self, int64_t axis0, int64_t axis1) {
5119 // No device check
5120 // DeviceGuard omitted
5121 return at::native::swapaxes_(self, axis0, axis1);
5122}
5123} // anonymous namespace
5124namespace {
5125at::Tensor wrapper_CompositeImplicitAutograd__swapdims(const at::Tensor & self, int64_t dim0, int64_t dim1) {
5126 // No device check
5127 // DeviceGuard omitted
5128 return at::native::swapdims(self, dim0, dim1);
5129}
5130} // anonymous namespace
5131namespace {
5132at::Tensor & wrapper_CompositeImplicitAutograd__swapdims_(at::Tensor & self, int64_t dim0, int64_t dim1) {
5133 // No device check
5134 // DeviceGuard omitted
5135 return at::native::swapdims_(self, dim0, dim1);
5136}
5137} // anonymous namespace
5138namespace {
5139::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__qr(const at::Tensor & self, bool some) {
5140 // No device check
5141 // DeviceGuard omitted
5142 return at::native::qr(self, some);
5143}
5144} // anonymous namespace
5145namespace {
5146::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_Q_qr_out(const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R) {
5147 // No device check
5148 // DeviceGuard omitted
5149 return at::native::qr_out(self, some, Q, R);
5150}
5151} // anonymous namespace
5152namespace {
5153at::Tensor wrapper_CompositeImplicitAutograd__orgqr(const at::Tensor & self, const at::Tensor & input2) {
5154 // No device check
5155 // DeviceGuard omitted
5156 return at::native::orgqr(self, input2);
5157}
5158} // anonymous namespace
5159namespace {
5160at::Tensor & wrapper_CompositeImplicitAutograd_out_orgqr_out(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) {
5161 // No device check
5162 // DeviceGuard omitted
5163 return at::native::orgqr_out(self, input2, out);
5164}
5165} // anonymous namespace
5166namespace {
5167::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___lu_with_info(const at::Tensor & self, bool pivot, bool check_errors) {
5168 // No device check
5169 // DeviceGuard omitted
5170 return at::native::_lu_with_info(self, pivot, check_errors);
5171}
5172} // anonymous namespace
5173namespace {
5174at::Tensor wrapper_CompositeImplicitAutograd__lu_solve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
5175 // No device check
5176 // DeviceGuard omitted
5177 return at::native::lu_solve(self, LU_data, LU_pivots);
5178}
5179} // anonymous namespace
5180namespace {
5181at::Tensor & wrapper_CompositeImplicitAutograd_out_lu_solve_out(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) {
5182 // No device check
5183 // DeviceGuard omitted
5184 return at::native::lu_solve_out(self, LU_data, LU_pivots, out);
5185}
5186} // anonymous namespace
5187namespace {
5188at::Tensor wrapper_CompositeImplicitAutograd__arctan2(const at::Tensor & self, const at::Tensor & other) {
5189 // No device check
5190 // DeviceGuard omitted
5191 return at::native::arctan2(self, other);
5192}
5193} // anonymous namespace
5194namespace {
5195at::Tensor & wrapper_CompositeImplicitAutograd_out_arctan2_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
5196 // No device check
5197 // DeviceGuard omitted
5198 return at::native::arctan2_out(self, other, out);
5199}
5200} // anonymous namespace
5201namespace {
5202at::Tensor & wrapper_CompositeImplicitAutograd__arctan2_(at::Tensor & self, const at::Tensor & other) {
5203 // No device check
5204 // DeviceGuard omitted
5205 return at::native::arctan2_(self, other);
5206}
5207} // anonymous namespace
5208namespace {
5209::std::tuple<at::Tensor,::std::vector<at::Tensor>> wrapper_CompositeImplicitAutograd__histogramdd(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
5210 // No device check
5211 // DeviceGuard omitted
5212 return at::native::histogramdd(self, bins, range, weight, density);
5213}
5214} // anonymous namespace
5215namespace {
5216::std::tuple<at::Tensor,::std::vector<at::Tensor>> wrapper_CompositeImplicitAutograd_int_bins_histogramdd(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
5217 // No device check
5218 // DeviceGuard omitted
5219 return at::native::histogramdd(self, bins, range, weight, density);
5220}
5221} // anonymous namespace
5222namespace {
5223::std::tuple<at::Tensor,::std::vector<at::Tensor>> wrapper_CompositeImplicitAutograd_TensorList_bins_histogramdd(const at::Tensor & self, at::TensorList bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
5224 // No device check
5225 // DeviceGuard omitted
5226 return at::native::histogramdd(self, bins, range, weight, density);
5227}
5228} // anonymous namespace
5229namespace {
5230at::Tensor wrapper_CompositeImplicitAutograd_other_max(const at::Tensor & self, const at::Tensor & other) {
5231 // No device check
5232 // DeviceGuard omitted
5233 return at::native::max(self, other);
5234}
5235} // anonymous namespace
5236namespace {
5237at::Tensor & wrapper_CompositeImplicitAutograd_out_max_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
5238 // No device check
5239 // DeviceGuard omitted
5240 return at::native::max_out(self, other, out);
5241}
5242} // anonymous namespace
5243namespace {
5244at::Tensor wrapper_CompositeImplicitAutograd_other_min(const at::Tensor & self, const at::Tensor & other) {
5245 // No device check
5246 // DeviceGuard omitted
5247 return at::native::min(self, other);
5248}
5249} // anonymous namespace
5250namespace {
5251at::Tensor & wrapper_CompositeImplicitAutograd_out_min_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
5252 // No device check
5253 // DeviceGuard omitted
5254 return at::native::min_out(self, other, out);
5255}
5256} // anonymous namespace
5257namespace {
5258at::Tensor wrapper_CompositeImplicitAutograd__quantile(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
5259 // No device check
5260 // DeviceGuard omitted
5261 return at::native::quantile(self, q, dim, keepdim, interpolation);
5262}
5263} // anonymous namespace
5264namespace {
5265at::Tensor & wrapper_CompositeImplicitAutograd_out_quantile_out(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
5266 // No device check
5267 // DeviceGuard omitted
5268 return at::native::quantile_out(self, q, dim, keepdim, interpolation, out);
5269}
5270} // anonymous namespace
5271namespace {
5272at::Tensor wrapper_CompositeImplicitAutograd_scalar_quantile(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
5273 // No device check
5274 // DeviceGuard omitted
5275 return at::native::quantile(self, q, dim, keepdim, interpolation);
5276}
5277} // anonymous namespace
5278namespace {
5279at::Tensor & wrapper_CompositeImplicitAutograd_scalar_out_quantile_out(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
5280 // No device check
5281 // DeviceGuard omitted
5282 return at::native::quantile_out(self, q, dim, keepdim, interpolation, out);
5283}
5284} // anonymous namespace
5285namespace {
5286at::Tensor wrapper_CompositeImplicitAutograd__nanquantile(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
5287 // No device check
5288 // DeviceGuard omitted
5289 return at::native::nanquantile(self, q, dim, keepdim, interpolation);
5290}
5291} // anonymous namespace
5292namespace {
5293at::Tensor & wrapper_CompositeImplicitAutograd_out_nanquantile_out(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
5294 // No device check
5295 // DeviceGuard omitted
5296 return at::native::nanquantile_out(self, q, dim, keepdim, interpolation, out);
5297}
5298} // anonymous namespace
5299namespace {
5300at::Tensor wrapper_CompositeImplicitAutograd_scalar_nanquantile(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
5301 // No device check
5302 // DeviceGuard omitted
5303 return at::native::nanquantile(self, q, dim, keepdim, interpolation);
5304}
5305} // anonymous namespace
5306namespace {
5307at::Tensor & wrapper_CompositeImplicitAutograd_scalar_out_nanquantile_out(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
5308 // No device check
5309 // DeviceGuard omitted
5310 return at::native::nanquantile_out(self, q, dim, keepdim, interpolation, out);
5311}
5312} // anonymous namespace
5313namespace {
5314::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dimname_sort(const at::Tensor & self, at::Dimname dim, bool descending) {
5315 // No device check
5316 // DeviceGuard omitted
5317 return at::native::sort(self, dim, descending);
5318}
5319} // anonymous namespace
5320namespace {
5321::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_dimname_values_sort_out(const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
5322 // No device check
5323 // DeviceGuard omitted
5324 return at::native::sort_out(self, dim, descending, values, indices);
5325}
5326} // anonymous namespace
5327namespace {
5328::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dimname_stable_sort(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending) {
5329 // No device check
5330 // DeviceGuard omitted
5331 return at::native::sort(self, stable, dim, descending);
5332}
5333} // anonymous namespace
5334namespace {
5335::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_dimname_values_stable_sort_out(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
5336 // No device check
5337 // DeviceGuard omitted
5338 return at::native::sort_out(self, stable, dim, descending, values, indices);
5339}
5340} // anonymous namespace
5341namespace {
5342at::Tensor wrapper_CompositeImplicitAutograd__msort(const at::Tensor & self) {
5343 // No device check
5344 // DeviceGuard omitted
5345 return at::native::msort(self);
5346}
5347} // anonymous namespace
5348namespace {
5349at::Tensor & wrapper_CompositeImplicitAutograd_out_msort_out(const at::Tensor & self, at::Tensor & out) {
5350 // No device check
5351 // DeviceGuard omitted
5352 return at::native::msort_out(self, out);
5353}
5354} // anonymous namespace
5355namespace {
5356at::Tensor wrapper_CompositeImplicitAutograd__argsort(const at::Tensor & self, int64_t dim, bool descending) {
5357 // No device check
5358 // DeviceGuard omitted
5359 return at::native::argsort(self, dim, descending);
5360}
5361} // anonymous namespace
5362namespace {
5363at::Tensor wrapper_CompositeImplicitAutograd_dimname_argsort(const at::Tensor & self, at::Dimname dim, bool descending) {
5364 // No device check
5365 // DeviceGuard omitted
5366 return at::native::argsort(self, dim, descending);
5367}
5368} // anonymous namespace
5369namespace {
5370at::Tensor wrapper_CompositeImplicitAutograd_Tensor_Tensor_float_power(const at::Tensor & self, const at::Tensor & exponent) {
5371 // No device check
5372 // DeviceGuard omitted
5373 return at::native::float_power(self, exponent);
5374}
5375} // anonymous namespace
5376namespace {
5377at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_Tensor_out_float_power_out(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
5378 // No device check
5379 // DeviceGuard omitted
5380 return at::native::float_power_out(self, exponent, out);
5381}
5382} // anonymous namespace
5383namespace {
5384at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_float_power_(at::Tensor & self, const at::Tensor & exponent) {
5385 // No device check
5386 // DeviceGuard omitted
5387 return at::native::float_power_(self, exponent);
5388}
5389} // anonymous namespace
5390namespace {
5391at::Tensor wrapper_CompositeImplicitAutograd_Scalar_float_power(const at::Scalar & self, const at::Tensor & exponent) {
5392 // No device check
5393 // DeviceGuard omitted
5394 return at::native::float_power(self, exponent);
5395}
5396} // anonymous namespace
5397namespace {
5398at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_out_float_power_out(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
5399 // No device check
5400 // DeviceGuard omitted
5401 return at::native::float_power_out(self, exponent, out);
5402}
5403} // anonymous namespace
5404namespace {
5405at::Tensor wrapper_CompositeImplicitAutograd_Tensor_Scalar_float_power(const at::Tensor & self, const at::Scalar & exponent) {
5406 // No device check
5407 // DeviceGuard omitted
5408 return at::native::float_power(self, exponent);
5409}
5410} // anonymous namespace
5411namespace {
5412at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_Scalar_out_float_power_out(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
5413 // No device check
5414 // DeviceGuard omitted
5415 return at::native::float_power_out(self, exponent, out);
5416}
5417} // anonymous namespace
5418namespace {
5419at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_float_power_(at::Tensor & self, const at::Scalar & exponent) {
5420 // No device check
5421 // DeviceGuard omitted
5422 return at::native::float_power_(self, exponent);
5423}
5424} // anonymous namespace
5425namespace {
5426at::Tensor wrapper_CompositeImplicitAutograd__l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
5427 // No device check
5428 // DeviceGuard omitted
5429 return at::native::l1_loss(self, target, reduction);
5430}
5431} // anonymous namespace
5432namespace {
5433at::Tensor wrapper_CompositeImplicitAutograd__multilabel_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
5434 // No device check
5435 // DeviceGuard omitted
5436 return at::native::multilabel_margin_loss(self, target, reduction);
5437}
5438} // anonymous namespace
5439namespace {
5440at::Tensor & wrapper_CompositeImplicitAutograd_out_multilabel_margin_loss_out(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
5441 // No device check
5442 // DeviceGuard omitted
5443 return at::native::multilabel_margin_loss_out(self, target, reduction, out);
5444}
5445} // anonymous namespace
5446namespace {
5447at::Tensor wrapper_CompositeImplicitAutograd__nll_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
5448 // No device check
5449 // DeviceGuard omitted
5450 return at::native::nll_loss_symint(self, target, weight, reduction, ignore_index);
5451}
5452} // anonymous namespace
5453namespace {
5454at::Tensor & wrapper_CompositeImplicitAutograd_out_nll_loss_out(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
5455 // No device check
5456 // DeviceGuard omitted
5457 return at::native::nll_loss_out(self, target, weight, reduction, ignore_index.expect_int(), out);
5458}
5459} // anonymous namespace
5460namespace {
5461at::Tensor wrapper_CompositeImplicitAutograd__nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
5462 // No device check
5463 // DeviceGuard omitted
5464 return at::native::nll_loss_nd_symint(self, target, weight, reduction, ignore_index);
5465}
5466} // anonymous namespace
5467namespace {
5468at::Tensor wrapper_CompositeImplicitAutograd__nll_loss2d(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
5469 // No device check
5470 // DeviceGuard omitted
5471 return at::native::nll_loss2d_symint(self, target, weight, reduction, ignore_index);
5472}
5473} // anonymous namespace
5474namespace {
5475at::Tensor & wrapper_CompositeImplicitAutograd_out_nll_loss2d_out(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
5476 // No device check
5477 // DeviceGuard omitted
5478 return at::native::nll_loss2d_out(self, target, weight, reduction, ignore_index.expect_int(), out);
5479}
5480} // anonymous namespace
5481namespace {
5482at::Tensor wrapper_CompositeImplicitAutograd__log_sigmoid(const at::Tensor & self) {
5483 // No device check
5484 // DeviceGuard omitted
5485 return at::native::log_sigmoid(self);
5486}
5487} // anonymous namespace
5488namespace {
5489at::Tensor & wrapper_CompositeImplicitAutograd_out_log_sigmoid_out(const at::Tensor & self, at::Tensor & out) {
5490 // No device check
5491 // DeviceGuard omitted
5492 return at::native::log_sigmoid_out(self, out);
5493}
5494} // anonymous namespace
5495namespace {
5496at::Tensor wrapper_CompositeImplicitAutograd__adaptive_avg_pool2d(const at::Tensor & self, c10::SymIntArrayRef output_size) {
5497 // No device check
5498 // DeviceGuard omitted
5499 return at::native::adaptive_avg_pool2d_symint(self, output_size);
5500}
5501} // anonymous namespace
5502namespace {
5503at::Tensor wrapper_CompositeImplicitAutograd__adaptive_avg_pool3d(const at::Tensor & self, c10::SymIntArrayRef output_size) {
5504 // No device check
5505 // DeviceGuard omitted
5506 return at::native::adaptive_avg_pool3d_symint(self, output_size);
5507}
5508} // anonymous namespace
5509namespace {
5510at::Tensor wrapper_CompositeImplicitAutograd___pad_circular(const at::Tensor & self, c10::SymIntArrayRef pad) {
5511 // No device check
5512 // DeviceGuard omitted
5513 return at::native::_pad_circular_symint(self, pad);
5514}
5515} // anonymous namespace
5516namespace {
5517at::Tensor wrapper_CompositeImplicitAutograd___pad_enum(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional<double> value) {
5518 // No device check
5519 // DeviceGuard omitted
5520 return at::native::_pad_enum_symint(self, pad, mode, value);
5521}
5522} // anonymous namespace
5523namespace {
5524at::Tensor wrapper_CompositeImplicitAutograd__pad(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional<double> value) {
5525 // No device check
5526 // DeviceGuard omitted
5527 return at::native::pad_symint(self, pad, mode, value);
5528}
5529} // anonymous namespace
5530namespace {
5531at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_linear1d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
5532 // No device check
5533 // DeviceGuard omitted
5534 return at::native::upsample_linear1d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, align_corners, scale_factors);
5535}
5536} // anonymous namespace
5537namespace {
5538at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_bilinear2d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
5539 // No device check
5540 // DeviceGuard omitted
5541 return at::native::upsample_bilinear2d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, align_corners, scale_factors);
5542}
5543} // anonymous namespace
5544namespace {
5545at::Tensor wrapper_CompositeImplicitAutograd_vec__upsample_bilinear2d_aa(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
5546 // No device check
5547 // DeviceGuard omitted
5548 return at::native::_upsample_bilinear2d_aa(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, align_corners, scale_factors);
5549}
5550} // anonymous namespace
5551namespace {
5552at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_trilinear3d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
5553 // No device check
5554 // DeviceGuard omitted
5555 return at::native::upsample_trilinear3d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, align_corners, scale_factors);
5556}
5557} // anonymous namespace
5558namespace {
5559at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_bicubic2d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
5560 // No device check
5561 // DeviceGuard omitted
5562 return at::native::upsample_bicubic2d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, align_corners, scale_factors);
5563}
5564} // anonymous namespace
5565namespace {
5566at::Tensor wrapper_CompositeImplicitAutograd_vec__upsample_bicubic2d_aa(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
5567 // No device check
5568 // DeviceGuard omitted
5569 return at::native::_upsample_bicubic2d_aa(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, align_corners, scale_factors);
5570}
5571} // anonymous namespace
5572namespace {
5573at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_nearest1d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
5574 // No device check
5575 // DeviceGuard omitted
5576 return at::native::upsample_nearest1d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, scale_factors);
5577}
5578} // anonymous namespace
5579namespace {
5580at::Tensor wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact1d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
5581 // No device check
5582 // DeviceGuard omitted
5583 return at::native::_upsample_nearest_exact1d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, scale_factors);
5584}
5585} // anonymous namespace
5586namespace {
5587at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_nearest2d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
5588 // No device check
5589 // DeviceGuard omitted
5590 return at::native::upsample_nearest2d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, scale_factors);
5591}
5592} // anonymous namespace
5593namespace {
5594at::Tensor wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact2d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
5595 // No device check
5596 // DeviceGuard omitted
5597 return at::native::_upsample_nearest_exact2d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, scale_factors);
5598}
5599} // anonymous namespace
5600namespace {
5601at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_nearest3d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
5602 // No device check
5603 // DeviceGuard omitted
5604 return at::native::upsample_nearest3d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, scale_factors);
5605}
5606} // anonymous namespace
5607namespace {
5608at::Tensor wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact3d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
5609 // No device check
5610 // DeviceGuard omitted
5611 return at::native::_upsample_nearest_exact3d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, scale_factors);
5612}
5613} // anonymous namespace
5614namespace {
5615at::Tensor wrapper_CompositeImplicitAutograd__thnn_conv2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
5616 // No device check
5617 // DeviceGuard omitted
5618 return at::native::thnn_conv2d(self, weight, kernel_size, bias, stride, padding);
5619}
5620} // anonymous namespace
5621namespace {
5622at::Tensor & wrapper_CompositeImplicitAutograd_out_thnn_conv2d_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
5623 // No device check
5624 // DeviceGuard omitted
5625 return at::native::thnn_conv2d_out(self, weight, kernel_size, bias, stride, padding, out);
5626}
5627} // anonymous namespace
5628namespace {
5629at::Tensor wrapper_CompositeImplicitAutograd__slow_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
5630 // No device check
5631 // DeviceGuard omitted
5632 return at::native::slow_conv3d(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding));
5633}
5634} // anonymous namespace
5635namespace {
5636at::Tensor & wrapper_CompositeImplicitAutograd_out_slow_conv3d_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) {
5637 // No device check
5638 // DeviceGuard omitted
5639 return at::native::slow_conv3d_out(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding), out);
5640}
5641} // anonymous namespace
5642namespace {
5643at::Tensor wrapper_CompositeImplicitAutograd__column_stack(at::TensorList tensors) {
5644 // No device check
5645 // DeviceGuard omitted
5646 return at::native::column_stack(tensors);
5647}
5648} // anonymous namespace
5649namespace {
5650at::Tensor & wrapper_CompositeImplicitAutograd_out_column_stack_out(at::TensorList tensors, at::Tensor & out) {
5651 // No device check
5652 // DeviceGuard omitted
5653 return at::native::column_stack_out(tensors, out);
5654}
5655} // anonymous namespace
5656namespace {
5657at::Tensor wrapper_CompositeImplicitAutograd__isfinite(const at::Tensor & self) {
5658 // No device check
5659 // DeviceGuard omitted
5660 return at::native::isfinite(self);
5661}
5662} // anonymous namespace
5663namespace {
5664at::Tensor wrapper_CompositeImplicitAutograd___add_batch_dim(const at::Tensor & self, int64_t batch_dim, int64_t level) {
5665 // No device check
5666 // DeviceGuard omitted
5667 return at::native::_add_batch_dim(self, batch_dim, level);
5668}
5669} // anonymous namespace
5670namespace {
5671at::Tensor wrapper_CompositeImplicitAutograd___remove_batch_dim(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) {
5672 // No device check
5673 // DeviceGuard omitted
5674 return at::native::_remove_batch_dim(self, level, batch_size, out_dim);
5675}
5676} // anonymous namespace
5677namespace {
5678at::Tensor wrapper_CompositeImplicitAutograd__special_expm1(const at::Tensor & self) {
5679 // No device check
5680 // DeviceGuard omitted
5681 return at::native::special_expm1(self);
5682}
5683} // anonymous namespace
5684namespace {
5685at::Tensor & wrapper_CompositeImplicitAutograd_out_special_expm1_out(const at::Tensor & self, at::Tensor & out) {
5686 // No device check
5687 // DeviceGuard omitted
5688 return at::native::special_expm1_out(self, out);
5689}
5690} // anonymous namespace
5691namespace {
5692at::Tensor wrapper_CompositeImplicitAutograd__special_exp2(const at::Tensor & self) {
5693 // No device check
5694 // DeviceGuard omitted
5695 return at::native::special_exp2(self);
5696}
5697} // anonymous namespace
5698namespace {
5699at::Tensor & wrapper_CompositeImplicitAutograd_out_special_exp2_out(const at::Tensor & self, at::Tensor & out) {
5700 // No device check
5701 // DeviceGuard omitted
5702 return at::native::special_exp2_out(self, out);
5703}
5704} // anonymous namespace
5705namespace {
5706at::Tensor wrapper_CompositeImplicitAutograd__special_psi(const at::Tensor & self) {
5707 // No device check
5708 // DeviceGuard omitted
5709 return at::native::special_psi(self);
5710}
5711} // anonymous namespace
5712namespace {
5713at::Tensor & wrapper_CompositeImplicitAutograd_out_special_psi_out(const at::Tensor & self, at::Tensor & out) {
5714 // No device check
5715 // DeviceGuard omitted
5716 return at::native::special_psi_out(self, out);
5717}
5718} // anonymous namespace
5719namespace {
5720at::Tensor wrapper_CompositeImplicitAutograd__special_digamma(const at::Tensor & self) {
5721 // No device check
5722 // DeviceGuard omitted
5723 return at::native::special_digamma(self);
5724}
5725} // anonymous namespace
5726namespace {
5727at::Tensor & wrapper_CompositeImplicitAutograd_out_special_digamma_out(const at::Tensor & self, at::Tensor & out) {
5728 // No device check
5729 // DeviceGuard omitted
5730 return at::native::special_digamma_out(self, out);
5731}
5732} // anonymous namespace
5733namespace {
5734at::Tensor wrapper_CompositeImplicitAutograd__special_gammaln(const at::Tensor & self) {
5735 // No device check
5736 // DeviceGuard omitted
5737 return at::native::special_gammaln(self);
5738}
5739} // anonymous namespace
5740namespace {
5741at::Tensor & wrapper_CompositeImplicitAutograd_out_special_gammaln_out(const at::Tensor & self, at::Tensor & out) {
5742 // No device check
5743 // DeviceGuard omitted
5744 return at::native::special_gammaln_out(self, out);
5745}
5746} // anonymous namespace
5747namespace {
5748at::Tensor wrapper_CompositeImplicitAutograd__special_erf(const at::Tensor & self) {
5749 // No device check
5750 // DeviceGuard omitted
5751 return at::native::special_erf(self);
5752}
5753} // anonymous namespace
5754namespace {
5755at::Tensor & wrapper_CompositeImplicitAutograd_out_special_erf_out(const at::Tensor & self, at::Tensor & out) {
5756 // No device check
5757 // DeviceGuard omitted
5758 return at::native::special_erf_out(self, out);
5759}
5760} // anonymous namespace
5761namespace {
5762at::Tensor wrapper_CompositeImplicitAutograd__special_erfc(const at::Tensor & self) {
5763 // No device check
5764 // DeviceGuard omitted
5765 return at::native::special_erfc(self);
5766}
5767} // anonymous namespace
5768namespace {
5769at::Tensor & wrapper_CompositeImplicitAutograd_out_special_erfc_out(const at::Tensor & self, at::Tensor & out) {
5770 // No device check
5771 // DeviceGuard omitted
5772 return at::native::special_erfc_out(self, out);
5773}
5774} // anonymous namespace
5775namespace {
5776at::Tensor wrapper_CompositeImplicitAutograd__special_erfinv(const at::Tensor & self) {
5777 // No device check
5778 // DeviceGuard omitted
5779 return at::native::special_erfinv(self);
5780}
5781} // anonymous namespace
5782namespace {
5783at::Tensor & wrapper_CompositeImplicitAutograd_out_special_erfinv_out(const at::Tensor & self, at::Tensor & out) {
5784 // No device check
5785 // DeviceGuard omitted
5786 return at::native::special_erfinv_out(self, out);
5787}
5788} // anonymous namespace
5789namespace {
5790at::Tensor wrapper_CompositeImplicitAutograd__special_ndtr(const at::Tensor & self) {
5791 // No device check
5792 // DeviceGuard omitted
5793 return at::native::special_ndtr(self);
5794}
5795} // anonymous namespace
5796namespace {
5797at::Tensor & wrapper_CompositeImplicitAutograd_out_special_ndtr_out(const at::Tensor & self, at::Tensor & out) {
5798 // No device check
5799 // DeviceGuard omitted
5800 return at::native::special_ndtr_out(self, out);
5801}
5802} // anonymous namespace
5803namespace {
5804at::Tensor wrapper_CompositeImplicitAutograd__special_xlogy(const at::Tensor & self, const at::Tensor & other) {
5805 // No device check
5806 // DeviceGuard omitted
5807 return at::native::special_xlogy(self, other);
5808}
5809} // anonymous namespace
5810namespace {
5811at::Tensor & wrapper_CompositeImplicitAutograd_out_special_xlogy_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
5812 // No device check
5813 // DeviceGuard omitted
5814 return at::native::special_xlogy_out(self, other, out);
5815}
5816} // anonymous namespace
5817namespace {
5818at::Tensor wrapper_CompositeImplicitAutograd_self_scalar_special_xlogy(const at::Scalar & self, const at::Tensor & other) {
5819 // No device check
5820 // DeviceGuard omitted
5821 return at::native::special_xlogy(self, other);
5822}
5823} // anonymous namespace
5824namespace {
5825at::Tensor & wrapper_CompositeImplicitAutograd_self_scalar_out_special_xlogy_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
5826 // No device check
5827 // DeviceGuard omitted
5828 return at::native::special_xlogy_out(self, other, out);
5829}
5830} // anonymous namespace
5831namespace {
5832at::Tensor wrapper_CompositeImplicitAutograd_other_scalar_special_xlogy(const at::Tensor & self, const at::Scalar & other) {
5833 // No device check
5834 // DeviceGuard omitted
5835 return at::native::special_xlogy(self, other);
5836}
5837} // anonymous namespace
5838namespace {
5839at::Tensor & wrapper_CompositeImplicitAutograd_other_scalar_out_special_xlogy_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
5840 // No device check
5841 // DeviceGuard omitted
5842 return at::native::special_xlogy_out(self, other, out);
5843}
5844} // anonymous namespace
5845namespace {
5846at::Tensor wrapper_CompositeImplicitAutograd__special_i0(const at::Tensor & self) {
5847 // No device check
5848 // DeviceGuard omitted
5849 return at::native::special_i0(self);
5850}
5851} // anonymous namespace
5852namespace {
5853at::Tensor & wrapper_CompositeImplicitAutograd_out_special_i0_out(const at::Tensor & self, at::Tensor & out) {
5854 // No device check
5855 // DeviceGuard omitted
5856 return at::native::special_i0_out(self, out);
5857}
5858} // anonymous namespace
5859namespace {
5860at::Tensor wrapper_CompositeImplicitAutograd__special_logit(const at::Tensor & self, c10::optional<double> eps) {
5861 // No device check
5862 // DeviceGuard omitted
5863 return at::native::special_logit(self, eps);
5864}
5865} // anonymous namespace
5866namespace {
5867at::Tensor & wrapper_CompositeImplicitAutograd_out_special_logit_out(const at::Tensor & self, c10::optional<double> eps, at::Tensor & out) {
5868 // No device check
5869 // DeviceGuard omitted
5870 return at::native::special_logit_out(self, eps, out);
5871}
5872} // anonymous namespace
5873namespace {
5874at::Tensor wrapper_CompositeImplicitAutograd__special_polygamma(int64_t n, const at::Tensor & self) {
5875 // No device check
5876 // DeviceGuard omitted
5877 return at::native::special_polygamma(n, self);
5878}
5879} // anonymous namespace
5880namespace {
5881at::Tensor & wrapper_CompositeImplicitAutograd_out_special_polygamma_out(int64_t n, const at::Tensor & self, at::Tensor & out) {
5882 // No device check
5883 // DeviceGuard omitted
5884 return at::native::special_polygamma_out(n, self, out);
5885}
5886} // anonymous namespace
5887namespace {
5888at::Tensor wrapper_CompositeImplicitAutograd__special_logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
5889 // No device check
5890 // DeviceGuard omitted
5891 return at::native::special_logsumexp(self, dim, keepdim);
5892}
5893} // anonymous namespace
5894namespace {
5895at::Tensor & wrapper_CompositeImplicitAutograd_out_special_logsumexp_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
5896 // No device check
5897 // DeviceGuard omitted
5898 return at::native::special_logsumexp_out(self, dim, keepdim, out);
5899}
5900} // anonymous namespace
5901namespace {
5902at::Tensor wrapper_CompositeImplicitAutograd__special_expit(const at::Tensor & self) {
5903 // No device check
5904 // DeviceGuard omitted
5905 return at::native::special_expit(self);
5906}
5907} // anonymous namespace
5908namespace {
5909at::Tensor & wrapper_CompositeImplicitAutograd_out_special_expit_out(const at::Tensor & self, at::Tensor & out) {
5910 // No device check
5911 // DeviceGuard omitted
5912 return at::native::special_expit_out(self, out);
5913}
5914} // anonymous namespace
5915namespace {
5916at::Tensor wrapper_CompositeImplicitAutograd__special_sinc(const at::Tensor & self) {
5917 // No device check
5918 // DeviceGuard omitted
5919 return at::native::special_sinc(self);
5920}
5921} // anonymous namespace
5922namespace {
5923at::Tensor & wrapper_CompositeImplicitAutograd_out_special_sinc_out(const at::Tensor & self, at::Tensor & out) {
5924 // No device check
5925 // DeviceGuard omitted
5926 return at::native::special_sinc_out(self, out);
5927}
5928} // anonymous namespace
5929namespace {
5930at::Tensor wrapper_CompositeImplicitAutograd__special_round(const at::Tensor & self, int64_t decimals) {
5931 // No device check
5932 // DeviceGuard omitted
5933 return at::native::special_round(self, decimals);
5934}
5935} // anonymous namespace
5936namespace {
5937at::Tensor & wrapper_CompositeImplicitAutograd_out_special_round_out(const at::Tensor & self, int64_t decimals, at::Tensor & out) {
5938 // No device check
5939 // DeviceGuard omitted
5940 return at::native::special_round_out(self, decimals, out);
5941}
5942} // anonymous namespace
5943namespace {
5944at::Tensor wrapper_CompositeImplicitAutograd__special_log1p(const at::Tensor & self) {
5945 // No device check
5946 // DeviceGuard omitted
5947 return at::native::special_log1p(self);
5948}
5949} // anonymous namespace
5950namespace {
5951at::Tensor & wrapper_CompositeImplicitAutograd_out_special_log1p_out(const at::Tensor & self, at::Tensor & out) {
5952 // No device check
5953 // DeviceGuard omitted
5954 return at::native::special_log1p_out(self, out);
5955}
5956} // anonymous namespace
5957namespace {
5958at::Tensor wrapper_CompositeImplicitAutograd__special_log_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
5959 // No device check
5960 // DeviceGuard omitted
5961 return at::native::special_log_softmax(self, dim, dtype);
5962}
5963} // anonymous namespace
5964namespace {
5965at::Tensor wrapper_CompositeImplicitAutograd__special_gammainc(const at::Tensor & self, const at::Tensor & other) {
5966 // No device check
5967 // DeviceGuard omitted
5968 return at::native::special_gammainc(self, other);
5969}
5970} // anonymous namespace
5971namespace {
5972at::Tensor & wrapper_CompositeImplicitAutograd_out_special_gammainc_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
5973 // No device check
5974 // DeviceGuard omitted
5975 return at::native::special_gammainc_out(self, other, out);
5976}
5977} // anonymous namespace
5978namespace {
5979at::Tensor wrapper_CompositeImplicitAutograd__special_gammaincc(const at::Tensor & self, const at::Tensor & other) {
5980 // No device check
5981 // DeviceGuard omitted
5982 return at::native::special_gammaincc(self, other);
5983}
5984} // anonymous namespace
5985namespace {
5986at::Tensor & wrapper_CompositeImplicitAutograd_out_special_gammaincc_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
5987 // No device check
5988 // DeviceGuard omitted
5989 return at::native::special_gammaincc_out(self, other, out);
5990}
5991} // anonymous namespace
5992namespace {
5993at::Tensor wrapper_CompositeImplicitAutograd__special_multigammaln(const at::Tensor & self, int64_t p) {
5994 // No device check
5995 // DeviceGuard omitted
5996 return at::native::special_multigammaln(self, p);
5997}
5998} // anonymous namespace
5999namespace {
6000at::Tensor & wrapper_CompositeImplicitAutograd_out_special_multigammaln_out(const at::Tensor & self, int64_t p, at::Tensor & out) {
6001 // No device check
6002 // DeviceGuard omitted
6003 return at::native::special_multigammaln_out(self, p, out);
6004}
6005} // anonymous namespace
6006namespace {
6007at::Tensor wrapper_CompositeImplicitAutograd__special_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
6008 // No device check
6009 // DeviceGuard omitted
6010 return at::native::special_softmax(self, dim, dtype);
6011}
6012} // anonymous namespace
6013namespace {
6014at::Tensor wrapper_CompositeImplicitAutograd__fft_fft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
6015 // No device check
6016 // DeviceGuard omitted
6017 return at::native::fft_fft(self, n, dim, norm);
6018}
6019} // anonymous namespace
6020namespace {
6021at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_fft_out(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6022 // No device check
6023 // DeviceGuard omitted
6024 return at::native::fft_fft_out(self, n, dim, norm, out);
6025}
6026} // anonymous namespace
6027namespace {
6028at::Tensor wrapper_CompositeImplicitAutograd__fft_ifft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
6029 // No device check
6030 // DeviceGuard omitted
6031 return at::native::fft_ifft(self, n, dim, norm);
6032}
6033} // anonymous namespace
6034namespace {
6035at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_ifft_out(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6036 // No device check
6037 // DeviceGuard omitted
6038 return at::native::fft_ifft_out(self, n, dim, norm, out);
6039}
6040} // anonymous namespace
6041namespace {
6042at::Tensor wrapper_CompositeImplicitAutograd__fft_rfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
6043 // No device check
6044 // DeviceGuard omitted
6045 return at::native::fft_rfft(self, n, dim, norm);
6046}
6047} // anonymous namespace
6048namespace {
6049at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_rfft_out(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6050 // No device check
6051 // DeviceGuard omitted
6052 return at::native::fft_rfft_out(self, n, dim, norm, out);
6053}
6054} // anonymous namespace
6055namespace {
6056at::Tensor wrapper_CompositeImplicitAutograd__fft_irfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
6057 // No device check
6058 // DeviceGuard omitted
6059 return at::native::fft_irfft(self, n, dim, norm);
6060}
6061} // anonymous namespace
6062namespace {
6063at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_irfft_out(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6064 // No device check
6065 // DeviceGuard omitted
6066 return at::native::fft_irfft_out(self, n, dim, norm, out);
6067}
6068} // anonymous namespace
6069namespace {
6070at::Tensor wrapper_CompositeImplicitAutograd__fft_hfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
6071 // No device check
6072 // DeviceGuard omitted
6073 return at::native::fft_hfft(self, n, dim, norm);
6074}
6075} // anonymous namespace
6076namespace {
6077at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_hfft_out(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6078 // No device check
6079 // DeviceGuard omitted
6080 return at::native::fft_hfft_out(self, n, dim, norm, out);
6081}
6082} // anonymous namespace
6083namespace {
6084at::Tensor wrapper_CompositeImplicitAutograd__fft_ihfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
6085 // No device check
6086 // DeviceGuard omitted
6087 return at::native::fft_ihfft(self, n, dim, norm);
6088}
6089} // anonymous namespace
6090namespace {
6091at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_ihfft_out(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6092 // No device check
6093 // DeviceGuard omitted
6094 return at::native::fft_ihfft_out(self, n, dim, norm, out);
6095}
6096} // anonymous namespace
6097namespace {
6098at::Tensor wrapper_CompositeImplicitAutograd__fft_fft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
6099 // No device check
6100 // DeviceGuard omitted
6101 return at::native::fft_fft2(self, s, dim, norm);
6102}
6103} // anonymous namespace
6104namespace {
6105at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_fft2_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6106 // No device check
6107 // DeviceGuard omitted
6108 return at::native::fft_fft2_out(self, s, dim, norm, out);
6109}
6110} // anonymous namespace
6111namespace {
6112at::Tensor wrapper_CompositeImplicitAutograd__fft_ifft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
6113 // No device check
6114 // DeviceGuard omitted
6115 return at::native::fft_ifft2(self, s, dim, norm);
6116}
6117} // anonymous namespace
6118namespace {
6119at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_ifft2_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6120 // No device check
6121 // DeviceGuard omitted
6122 return at::native::fft_ifft2_out(self, s, dim, norm, out);
6123}
6124} // anonymous namespace
6125namespace {
6126at::Tensor wrapper_CompositeImplicitAutograd__fft_rfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
6127 // No device check
6128 // DeviceGuard omitted
6129 return at::native::fft_rfft2(self, s, dim, norm);
6130}
6131} // anonymous namespace
6132namespace {
6133at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_rfft2_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6134 // No device check
6135 // DeviceGuard omitted
6136 return at::native::fft_rfft2_out(self, s, dim, norm, out);
6137}
6138} // anonymous namespace
6139namespace {
6140at::Tensor wrapper_CompositeImplicitAutograd__fft_irfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
6141 // No device check
6142 // DeviceGuard omitted
6143 return at::native::fft_irfft2(self, s, dim, norm);
6144}
6145} // anonymous namespace
6146namespace {
6147at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_irfft2_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6148 // No device check
6149 // DeviceGuard omitted
6150 return at::native::fft_irfft2_out(self, s, dim, norm, out);
6151}
6152} // anonymous namespace
6153namespace {
6154at::Tensor wrapper_CompositeImplicitAutograd__fft_hfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
6155 // No device check
6156 // DeviceGuard omitted
6157 return at::native::fft_hfft2(self, s, dim, norm);
6158}
6159} // anonymous namespace
6160namespace {
6161const at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_hfft2_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
6162 // No device check
6163 // DeviceGuard omitted
6164 return at::native::fft_hfft2_out(self, s, dim, norm, out);
6165}
6166} // anonymous namespace
6167namespace {
6168at::Tensor wrapper_CompositeImplicitAutograd__fft_ihfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
6169 // No device check
6170 // DeviceGuard omitted
6171 return at::native::fft_ihfft2(self, s, dim, norm);
6172}
6173} // anonymous namespace
6174namespace {
6175const at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_ihfft2_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
6176 // No device check
6177 // DeviceGuard omitted
6178 return at::native::fft_ihfft2_out(self, s, dim, norm, out);
6179}
6180} // anonymous namespace
6181namespace {
6182at::Tensor wrapper_CompositeImplicitAutograd__fft_fftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
6183 // No device check
6184 // DeviceGuard omitted
6185 return at::native::fft_fftn(self, s, dim, norm);
6186}
6187} // anonymous namespace
6188namespace {
6189at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_fftn_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6190 // No device check
6191 // DeviceGuard omitted
6192 return at::native::fft_fftn_out(self, s, dim, norm, out);
6193}
6194} // anonymous namespace
6195namespace {
6196at::Tensor wrapper_CompositeImplicitAutograd__fft_ifftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
6197 // No device check
6198 // DeviceGuard omitted
6199 return at::native::fft_ifftn(self, s, dim, norm);
6200}
6201} // anonymous namespace
6202namespace {
6203at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_ifftn_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6204 // No device check
6205 // DeviceGuard omitted
6206 return at::native::fft_ifftn_out(self, s, dim, norm, out);
6207}
6208} // anonymous namespace
6209namespace {
6210at::Tensor wrapper_CompositeImplicitAutograd__fft_rfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
6211 // No device check
6212 // DeviceGuard omitted
6213 return at::native::fft_rfftn(self, s, dim, norm);
6214}
6215} // anonymous namespace
6216namespace {
6217at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_rfftn_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6218 // No device check
6219 // DeviceGuard omitted
6220 return at::native::fft_rfftn_out(self, s, dim, norm, out);
6221}
6222} // anonymous namespace
6223namespace {
6224at::Tensor wrapper_CompositeImplicitAutograd__fft_irfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
6225 // No device check
6226 // DeviceGuard omitted
6227 return at::native::fft_irfftn(self, s, dim, norm);
6228}
6229} // anonymous namespace
6230namespace {
6231at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_irfftn_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
6232 // No device check
6233 // DeviceGuard omitted
6234 return at::native::fft_irfftn_out(self, s, dim, norm, out);
6235}
6236} // anonymous namespace
6237namespace {
6238at::Tensor wrapper_CompositeImplicitAutograd__fft_hfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
6239 // No device check
6240 // DeviceGuard omitted
6241 return at::native::fft_hfftn(self, s, dim, norm);
6242}
6243} // anonymous namespace
6244namespace {
6245const at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_hfftn_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
6246 // No device check
6247 // DeviceGuard omitted
6248 return at::native::fft_hfftn_out(self, s, dim, norm, out);
6249}
6250} // anonymous namespace
6251namespace {
6252at::Tensor wrapper_CompositeImplicitAutograd__fft_ihfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
6253 // No device check
6254 // DeviceGuard omitted
6255 return at::native::fft_ihfftn(self, s, dim, norm);
6256}
6257} // anonymous namespace
6258namespace {
6259const at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_ihfftn_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
6260 // No device check
6261 // DeviceGuard omitted
6262 return at::native::fft_ihfftn_out(self, s, dim, norm, out);
6263}
6264} // anonymous namespace
6265namespace {
6266at::Tensor wrapper_CompositeImplicitAutograd__fft_fftshift(const at::Tensor & self, at::OptionalIntArrayRef dim) {
6267 // No device check
6268 // DeviceGuard omitted
6269 return at::native::fft_fftshift(self, dim);
6270}
6271} // anonymous namespace
6272namespace {
6273at::Tensor wrapper_CompositeImplicitAutograd__fft_ifftshift(const at::Tensor & self, at::OptionalIntArrayRef dim) {
6274 // No device check
6275 // DeviceGuard omitted
6276 return at::native::fft_ifftshift(self, dim);
6277}
6278} // anonymous namespace
6279namespace {
6280at::Tensor wrapper_CompositeImplicitAutograd__linalg_cholesky(const at::Tensor & self, bool upper) {
6281 // No device check
6282 // DeviceGuard omitted
6283 return at::native::linalg_cholesky(self, upper);
6284}
6285} // anonymous namespace
6286namespace {
6287at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_cholesky_out(const at::Tensor & self, bool upper, at::Tensor & out) {
6288 // No device check
6289 // DeviceGuard omitted
6290 return at::native::linalg_cholesky_out(self, upper, out);
6291}
6292} // anonymous namespace
6293namespace {
6294::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__linalg_lu_factor(const at::Tensor & A, bool pivot) {
6295 // No device check
6296 // DeviceGuard omitted
6297 return at::native::linalg_lu_factor(A, pivot);
6298}
6299} // anonymous namespace
6300namespace {
6301::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_out_linalg_lu_factor_out(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) {
6302 // No device check
6303 // DeviceGuard omitted
6304 return at::native::linalg_lu_factor_out(A, pivot, LU, pivots);
6305}
6306} // anonymous namespace
6307namespace {
6308at::Tensor wrapper_CompositeImplicitAutograd__linalg_det(const at::Tensor & A) {
6309 // No device check
6310 // DeviceGuard omitted
6311 return at::native::linalg_det(A);
6312}
6313} // anonymous namespace
6314namespace {
6315at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_det_out(const at::Tensor & A, at::Tensor & out) {
6316 // No device check
6317 // DeviceGuard omitted
6318 return at::native::linalg_det_out(A, out);
6319}
6320} // anonymous namespace
6321namespace {
6322at::Tensor wrapper_CompositeImplicitAutograd__det(const at::Tensor & self) {
6323 // No device check
6324 // DeviceGuard omitted
6325 return at::native::det(self);
6326}
6327} // anonymous namespace
6328namespace {
6329::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__linalg_ldl_factor(const at::Tensor & self, bool hermitian) {
6330 // No device check
6331 // DeviceGuard omitted
6332 return at::native::linalg_ldl_factor(self, hermitian);
6333}
6334} // anonymous namespace
6335namespace {
6336::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_out_linalg_ldl_factor_out(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) {
6337 // No device check
6338 // DeviceGuard omitted
6339 return at::native::linalg_ldl_factor_out(self, hermitian, LD, pivots);
6340}
6341} // anonymous namespace
6342namespace {
6343at::Tensor wrapper_CompositeImplicitAutograd__linalg_matmul(const at::Tensor & self, const at::Tensor & other) {
6344 // No device check
6345 // DeviceGuard omitted
6346 return at::native::linalg_matmul(self, other);
6347}
6348} // anonymous namespace
6349namespace {
6350at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_matmul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
6351 // No device check
6352 // DeviceGuard omitted
6353 return at::native::linalg_matmul_out(self, other, out);
6354}
6355} // anonymous namespace
6356namespace {
6357at::Tensor wrapper_CompositeImplicitAutograd__linalg_vecdot(const at::Tensor & x, const at::Tensor & y, int64_t dim) {
6358 // No device check
6359 // DeviceGuard omitted
6360 return at::native::linalg_vecdot(x, y, dim);
6361}
6362} // anonymous namespace
6363namespace {
6364at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_vecdot_out(const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) {
6365 // No device check
6366 // DeviceGuard omitted
6367 return at::native::linalg_vecdot_out(x, y, dim, out);
6368}
6369} // anonymous namespace
6370namespace {
6371::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__linalg_slogdet(const at::Tensor & A) {
6372 // No device check
6373 // DeviceGuard omitted
6374 return at::native::linalg_slogdet(A);
6375}
6376} // anonymous namespace
6377namespace {
6378::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_out_linalg_slogdet_out(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) {
6379 // No device check
6380 // DeviceGuard omitted
6381 return at::native::linalg_slogdet_out(A, sign, logabsdet);
6382}
6383} // anonymous namespace
6384namespace {
6385::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__slogdet(const at::Tensor & self) {
6386 // No device check
6387 // DeviceGuard omitted
6388 return at::native::slogdet(self);
6389}
6390} // anonymous namespace
6391namespace {
6392::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_out_slogdet_out(const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet) {
6393 // No device check
6394 // DeviceGuard omitted
6395 return at::native::slogdet_out(self, sign, logabsdet);
6396}
6397} // anonymous namespace
6398namespace {
6399at::Tensor wrapper_CompositeImplicitAutograd__logdet(const at::Tensor & self) {
6400 // No device check
6401 // DeviceGuard omitted
6402 return at::native::logdet(self);
6403}
6404} // anonymous namespace
6405namespace {
6406at::Tensor wrapper_CompositeImplicitAutograd__linalg_eigvals(const at::Tensor & self) {
6407 // No device check
6408 // DeviceGuard omitted
6409 return at::native::linalg_eigvals(self);
6410}
6411} // anonymous namespace
6412namespace {
6413at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_eigvals_out(const at::Tensor & self, at::Tensor & out) {
6414 // No device check
6415 // DeviceGuard omitted
6416 return at::native::linalg_eigvals_out(self, out);
6417}
6418} // anonymous namespace
6419namespace {
6420::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__linalg_eigh(const at::Tensor & self, c10::string_view UPLO) {
6421 // No device check
6422 // DeviceGuard omitted
6423 return at::native::linalg_eigh(self, UPLO);
6424}
6425} // anonymous namespace
6426namespace {
6427::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_eigvals_linalg_eigh_out(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) {
6428 // No device check
6429 // DeviceGuard omitted
6430 return at::native::linalg_eigh_out(self, UPLO, eigvals, eigvecs);
6431}
6432} // anonymous namespace
6433namespace {
6434at::Tensor wrapper_CompositeImplicitAutograd__linalg_eigvalsh(const at::Tensor & self, c10::string_view UPLO) {
6435 // No device check
6436 // DeviceGuard omitted
6437 return at::native::linalg_eigvalsh(self, UPLO);
6438}
6439} // anonymous namespace
6440namespace {
6441at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_eigvalsh_out(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) {
6442 // No device check
6443 // DeviceGuard omitted
6444 return at::native::linalg_eigvalsh_out(self, UPLO, out);
6445}
6446} // anonymous namespace
6447namespace {
6448at::Tensor wrapper_CompositeImplicitAutograd__linalg_inv(const at::Tensor & A) {
6449 // No device check
6450 // DeviceGuard omitted
6451 return at::native::linalg_inv(A);
6452}
6453} // anonymous namespace
6454namespace {
6455at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_inv_out(const at::Tensor & A, at::Tensor & out) {
6456 // No device check
6457 // DeviceGuard omitted
6458 return at::native::linalg_inv_out(A, out);
6459}
6460} // anonymous namespace
6461namespace {
6462at::Tensor wrapper_CompositeImplicitAutograd__inverse(const at::Tensor & self) {
6463 // No device check
6464 // DeviceGuard omitted
6465 return at::native::inverse(self);
6466}
6467} // anonymous namespace
6468namespace {
6469at::Tensor & wrapper_CompositeImplicitAutograd_out_inverse_out(const at::Tensor & self, at::Tensor & out) {
6470 // No device check
6471 // DeviceGuard omitted
6472 return at::native::inverse_out(self, out);
6473}
6474} // anonymous namespace
6475namespace {
6476at::Tensor wrapper_CompositeImplicitAutograd__inner(const at::Tensor & self, const at::Tensor & other) {
6477 // No device check
6478 // DeviceGuard omitted
6479 return at::native::inner(self, other);
6480}
6481} // anonymous namespace
6482namespace {
6483at::Tensor & wrapper_CompositeImplicitAutograd_out_inner_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
6484 // No device check
6485 // DeviceGuard omitted
6486 return at::native::inner_out(self, other, out);
6487}
6488} // anonymous namespace
6489namespace {
6490at::Tensor wrapper_CompositeImplicitAutograd__outer(const at::Tensor & self, const at::Tensor & vec2) {
6491 // No device check
6492 // DeviceGuard omitted
6493 return at::native::outer(self, vec2);
6494}
6495} // anonymous namespace
6496namespace {
6497at::Tensor & wrapper_CompositeImplicitAutograd_out_outer_out(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
6498 // No device check
6499 // DeviceGuard omitted
6500 return at::native::outer_out(self, vec2, out);
6501}
6502} // anonymous namespace
6503namespace {
6504at::Tensor wrapper_CompositeImplicitAutograd__ger(const at::Tensor & self, const at::Tensor & vec2) {
6505 // No device check
6506 // DeviceGuard omitted
6507 return at::native::ger(self, vec2);
6508}
6509} // anonymous namespace
6510namespace {
6511at::Tensor & wrapper_CompositeImplicitAutograd_out_ger_out(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
6512 // No device check
6513 // DeviceGuard omitted
6514 return at::native::ger_out(self, vec2, out);
6515}
6516} // anonymous namespace
6517namespace {
6518at::Tensor wrapper_CompositeImplicitAutograd__linalg_norm(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
6519 // No device check
6520 // DeviceGuard omitted
6521 return at::native::linalg_norm(self, ord, dim, keepdim, dtype);
6522}
6523} // anonymous namespace
6524namespace {
6525at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_norm_out(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
6526 // No device check
6527 // DeviceGuard omitted
6528 return at::native::linalg_norm_out(self, ord, dim, keepdim, dtype, out);
6529}
6530} // anonymous namespace
6531namespace {
6532at::Tensor wrapper_CompositeImplicitAutograd_ord_str_linalg_norm(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
6533 // No device check
6534 // DeviceGuard omitted
6535 return at::native::linalg_norm(self, ord, dim, keepdim, dtype);
6536}
6537} // anonymous namespace
6538namespace {
6539at::Tensor & wrapper_CompositeImplicitAutograd_ord_str_out_linalg_norm_out(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
6540 // No device check
6541 // DeviceGuard omitted
6542 return at::native::linalg_norm_out(self, ord, dim, keepdim, dtype, out);
6543}
6544} // anonymous namespace
6545namespace {
6546at::Tensor wrapper_CompositeImplicitAutograd__linalg_matrix_norm(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
6547 // No device check
6548 // DeviceGuard omitted
6549 return at::native::linalg_matrix_norm(self, ord, dim, keepdim, dtype);
6550}
6551} // anonymous namespace
6552namespace {
6553at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_matrix_norm_out(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
6554 // No device check
6555 // DeviceGuard omitted
6556 return at::native::linalg_matrix_norm_out(self, ord, dim, keepdim, dtype, out);
6557}
6558} // anonymous namespace
6559namespace {
6560at::Tensor wrapper_CompositeImplicitAutograd_str_ord_linalg_matrix_norm(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
6561 // No device check
6562 // DeviceGuard omitted
6563 return at::native::linalg_matrix_norm(self, ord, dim, keepdim, dtype);
6564}
6565} // anonymous namespace
6566namespace {
6567at::Tensor & wrapper_CompositeImplicitAutograd_str_ord_out_linalg_matrix_norm_out(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
6568 // No device check
6569 // DeviceGuard omitted
6570 return at::native::linalg_matrix_norm_out(self, ord, dim, keepdim, dtype, out);
6571}
6572} // anonymous namespace
6573namespace {
6574::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__linalg_svd(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver) {
6575 // No device check
6576 // DeviceGuard omitted
6577 return at::native::linalg_svd(A, full_matrices, driver);
6578}
6579} // anonymous namespace
6580namespace {
6581::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_U_linalg_svd_out(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
6582 // No device check
6583 // DeviceGuard omitted
6584 return at::native::linalg_svd_out(A, full_matrices, driver, U, S, Vh);
6585}
6586} // anonymous namespace
6587namespace {
6588at::Tensor wrapper_CompositeImplicitAutograd__linalg_svdvals(const at::Tensor & A, c10::optional<c10::string_view> driver) {
6589 // No device check
6590 // DeviceGuard omitted
6591 return at::native::linalg_svdvals(A, driver);
6592}
6593} // anonymous namespace
6594namespace {
6595at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_svdvals_out(const at::Tensor & A, c10::optional<c10::string_view> driver, at::Tensor & out) {
6596 // No device check
6597 // DeviceGuard omitted
6598 return at::native::linalg_svdvals_out(A, driver, out);
6599}
6600} // anonymous namespace
6601namespace {
6602at::Tensor wrapper_CompositeImplicitAutograd__linalg_cond(const at::Tensor & self, const c10::optional<at::Scalar> & p) {
6603 // No device check
6604 // DeviceGuard omitted
6605 return at::native::linalg_cond(self, p);
6606}
6607} // anonymous namespace
6608namespace {
6609at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_cond_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::Tensor & out) {
6610 // No device check
6611 // DeviceGuard omitted
6612 return at::native::linalg_cond_out(self, p, out);
6613}
6614} // anonymous namespace
6615namespace {
6616at::Tensor wrapper_CompositeImplicitAutograd_p_str_linalg_cond(const at::Tensor & self, c10::string_view p) {
6617 // No device check
6618 // DeviceGuard omitted
6619 return at::native::linalg_cond(self, p);
6620}
6621} // anonymous namespace
6622namespace {
6623at::Tensor & wrapper_CompositeImplicitAutograd_p_str_out_linalg_cond_out(const at::Tensor & self, c10::string_view p, at::Tensor & out) {
6624 // No device check
6625 // DeviceGuard omitted
6626 return at::native::linalg_cond_out(self, p, out);
6627}
6628} // anonymous namespace
6629namespace {
6630at::Tensor wrapper_CompositeImplicitAutograd_atol_rtol_float_linalg_pinv(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
6631 // No device check
6632 // DeviceGuard omitted
6633 return at::native::linalg_pinv(self, atol, rtol, hermitian);
6634}
6635} // anonymous namespace
6636namespace {
6637at::Tensor & wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_pinv_out(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) {
6638 // No device check
6639 // DeviceGuard omitted
6640 return at::native::linalg_pinv_out(self, atol, rtol, hermitian, out);
6641}
6642} // anonymous namespace
6643namespace {
6644at::Tensor wrapper_CompositeImplicitAutograd__linalg_pinv(const at::Tensor & self, double rcond, bool hermitian) {
6645 // No device check
6646 // DeviceGuard omitted
6647 return at::native::linalg_pinv(self, rcond, hermitian);
6648}
6649} // anonymous namespace
6650namespace {
6651at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_pinv_out(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) {
6652 // No device check
6653 // DeviceGuard omitted
6654 return at::native::linalg_pinv_out(self, rcond, hermitian, out);
6655}
6656} // anonymous namespace
6657namespace {
6658at::Tensor wrapper_CompositeImplicitAutograd_rcond_tensor_linalg_pinv(const at::Tensor & self, const at::Tensor & rcond, bool hermitian) {
6659 // No device check
6660 // DeviceGuard omitted
6661 return at::native::linalg_pinv(self, rcond, hermitian);
6662}
6663} // anonymous namespace
6664namespace {
6665at::Tensor & wrapper_CompositeImplicitAutograd_out_rcond_tensor_linalg_pinv_out(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) {
6666 // No device check
6667 // DeviceGuard omitted
6668 return at::native::linalg_pinv_out(self, rcond, hermitian, out);
6669}
6670} // anonymous namespace
6671namespace {
6672::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
6673 // No device check
6674 // DeviceGuard omitted
6675 return at::native::linalg_solve_ex(A, B, left, check_errors);
6676}
6677} // anonymous namespace
6678namespace {
6679::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_out_linalg_solve_ex_out(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) {
6680 // No device check
6681 // DeviceGuard omitted
6682 return at::native::linalg_solve_ex_out(A, B, left, check_errors, result, info);
6683}
6684} // anonymous namespace
6685namespace {
6686at::Tensor wrapper_CompositeImplicitAutograd__linalg_solve(const at::Tensor & A, const at::Tensor & B, bool left) {
6687 // No device check
6688 // DeviceGuard omitted
6689 return at::native::linalg_solve(A, B, left);
6690}
6691} // anonymous namespace
6692namespace {
6693at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_solve_out(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) {
6694 // No device check
6695 // DeviceGuard omitted
6696 return at::native::linalg_solve_out(A, B, left, out);
6697}
6698} // anonymous namespace
6699namespace {
6700at::Tensor wrapper_CompositeImplicitAutograd__linalg_tensorinv(const at::Tensor & self, int64_t ind) {
6701 // No device check
6702 // DeviceGuard omitted
6703 return at::native::linalg_tensorinv(self, ind);
6704}
6705} // anonymous namespace
6706namespace {
6707at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_tensorinv_out(const at::Tensor & self, int64_t ind, at::Tensor & out) {
6708 // No device check
6709 // DeviceGuard omitted
6710 return at::native::linalg_tensorinv_out(self, ind, out);
6711}
6712} // anonymous namespace
6713namespace {
6714at::Tensor wrapper_CompositeImplicitAutograd__linalg_tensorsolve(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) {
6715 // No device check
6716 // DeviceGuard omitted
6717 return at::native::linalg_tensorsolve(self, other, dims);
6718}
6719} // anonymous namespace
6720namespace {
6721at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_tensorsolve_out(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) {
6722 // No device check
6723 // DeviceGuard omitted
6724 return at::native::linalg_tensorsolve_out(self, other, dims, out);
6725}
6726} // anonymous namespace
6727namespace {
6728at::Tensor wrapper_CompositeImplicitAutograd__linalg_matrix_power(const at::Tensor & self, int64_t n) {
6729 // No device check
6730 // DeviceGuard omitted
6731 return at::native::linalg_matrix_power(self, n);
6732}
6733} // anonymous namespace
6734namespace {
6735at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_matrix_power_out(const at::Tensor & self, int64_t n, at::Tensor & out) {
6736 // No device check
6737 // DeviceGuard omitted
6738 return at::native::linalg_matrix_power_out(self, n, out);
6739}
6740} // anonymous namespace
6741namespace {
6742at::Tensor wrapper_CompositeImplicitAutograd_atol_rtol_tensor_linalg_matrix_rank(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
6743 // No device check
6744 // DeviceGuard omitted
6745 return at::native::linalg_matrix_rank(input, atol, rtol, hermitian);
6746}
6747} // anonymous namespace
6748namespace {
6749at::Tensor & wrapper_CompositeImplicitAutograd_atol_rtol_tensor_out_linalg_matrix_rank_out(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
6750 // No device check
6751 // DeviceGuard omitted
6752 return at::native::linalg_matrix_rank_out(input, atol, rtol, hermitian, out);
6753}
6754} // anonymous namespace
6755namespace {
6756at::Tensor wrapper_CompositeImplicitAutograd_atol_rtol_float_linalg_matrix_rank(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
6757 // No device check
6758 // DeviceGuard omitted
6759 return at::native::linalg_matrix_rank(self, atol, rtol, hermitian);
6760}
6761} // anonymous namespace
6762namespace {
6763at::Tensor & wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_matrix_rank_out(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) {
6764 // No device check
6765 // DeviceGuard omitted
6766 return at::native::linalg_matrix_rank_out(self, atol, rtol, hermitian, out);
6767}
6768} // anonymous namespace
6769namespace {
6770at::Tensor wrapper_CompositeImplicitAutograd__linalg_matrix_rank(const at::Tensor & self, double tol, bool hermitian) {
6771 // No device check
6772 // DeviceGuard omitted
6773 return at::native::linalg_matrix_rank(self, tol, hermitian);
6774}
6775} // anonymous namespace
6776namespace {
6777at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_matrix_rank_out(const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) {
6778 // No device check
6779 // DeviceGuard omitted
6780 return at::native::linalg_matrix_rank_out(self, tol, hermitian, out);
6781}
6782} // anonymous namespace
6783namespace {
6784at::Tensor wrapper_CompositeImplicitAutograd_tol_tensor_linalg_matrix_rank(const at::Tensor & input, const at::Tensor & tol, bool hermitian) {
6785 // No device check
6786 // DeviceGuard omitted
6787 return at::native::linalg_matrix_rank(input, tol, hermitian);
6788}
6789} // anonymous namespace
6790namespace {
6791at::Tensor & wrapper_CompositeImplicitAutograd_out_tol_tensor_linalg_matrix_rank_out(const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) {
6792 // No device check
6793 // DeviceGuard omitted
6794 return at::native::linalg_matrix_rank_out(input, tol, hermitian, out);
6795}
6796} // anonymous namespace
6797namespace {
6798at::Tensor wrapper_CompositeImplicitAutograd__linalg_multi_dot(at::TensorList tensors) {
6799 // No device check
6800 // DeviceGuard omitted
6801 return at::native::linalg_multi_dot(tensors);
6802}
6803} // anonymous namespace
6804namespace {
6805at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_multi_dot_out(at::TensorList tensors, at::Tensor & out) {
6806 // No device check
6807 // DeviceGuard omitted
6808 return at::native::linalg_multi_dot_out(tensors, out);
6809}
6810} // anonymous namespace
6811namespace {
6812at::Tensor wrapper_CompositeImplicitAutograd__nested_to_padded_tensor(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) {
6813 // No device check
6814 // DeviceGuard omitted
6815 return at::native::nested_to_padded_tensor(self, padding, output_size);
6816}
6817} // anonymous namespace
6818namespace {
6819at::Tensor wrapper_CompositeImplicitAutograd___test_serialization_subcmul(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
6820 // No device check
6821 // DeviceGuard omitted
6822 return at::native::_test_serialization_subcmul(self, other, alpha);
6823}
6824} // anonymous namespace
6825namespace {
6826at::Tensor wrapper_CompositeImplicitAutograd___test_string_default(const at::Tensor & dummy, c10::string_view a, c10::string_view b) {
6827 // No device check
6828 // DeviceGuard omitted
6829 return at::native::_test_string_default(dummy, a, b);
6830}
6831} // anonymous namespace
6832namespace {
6833at::Tensor wrapper_CompositeImplicitAutograd_a__test_ambiguous_defaults(const at::Tensor & dummy, int64_t a, int64_t b) {
6834 // No device check
6835 // DeviceGuard omitted
6836 return at::native::_test_ambiguous_defaults(dummy, a, b);
6837}
6838} // anonymous namespace
6839namespace {
6840at::Tensor wrapper_CompositeImplicitAutograd_b__test_ambiguous_defaults(const at::Tensor & dummy, int64_t a, c10::string_view b) {
6841 // No device check
6842 // DeviceGuard omitted
6843 return at::native::_test_ambiguous_defaults(dummy, a, b);
6844}
6845} // anonymous namespace
6846namespace {
6847at::Tensor wrapper_CompositeImplicitAutograd_ntonly__test_autograd_multiple_dispatch(const at::Tensor & self, bool b) {
6848 // No device check
6849 // DeviceGuard omitted
6850 return at::native::_test_autograd_multiple_dispatch_ntonly(self, b);
6851}
6852} // anonymous namespace
6853namespace {
6854at::Tensor wrapper_CompositeImplicitAutograd__pad_sequence(at::TensorList sequences, bool batch_first, double padding_value) {
6855 // No device check
6856 // DeviceGuard omitted
6857 return at::native::pad_sequence(sequences, batch_first, padding_value);
6858}
6859} // anonymous namespace
6860namespace {
6861at::Tensor wrapper_CompositeImplicitAutograd__flatten_dense_tensors(at::TensorList tensors) {
6862 // No device check
6863 // DeviceGuard omitted
6864 return at::native::flatten_dense_tensors(tensors);
6865}
6866} // anonymous namespace
6867namespace {
6868::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__unflatten_dense_tensors(const at::Tensor & flat, at::TensorList tensors) {
6869 // No device check
6870 // DeviceGuard omitted
6871 return at::native::unflatten_dense_tensors(flat, tensors);
6872}
6873} // anonymous namespace
6874namespace {
6875at::Tensor wrapper_CompositeImplicitAutograd__scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal) {
6876 // No device check
6877 // DeviceGuard omitted
6878 return at::native::scaled_dot_product_attention(query, key, value, attn_mask, dropout_p, is_causal);
6879}
6880} // anonymous namespace
6881namespace {
6882::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool need_attn_weights, bool is_causal) {
6883 // No device check
6884 // DeviceGuard omitted
6885 return at::native::_scaled_dot_product_attention(query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal);
6886}
6887} // anonymous namespace
6888namespace {
6889::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___scaled_dot_product_attention_math(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const c10::optional<at::Tensor> & dropout_mask) {
6890 // No device check
6891 // DeviceGuard omitted
6892 return at::native::_scaled_dot_product_attention_math(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask);
6893}
6894} // anonymous namespace
6895namespace {
6896at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n) {
6897 // No device check
6898 // DeviceGuard omitted
6899 return at::native::special_chebyshev_polynomial_t(x, n);
6900}
6901} // anonymous namespace
6902namespace {
6903at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_t_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
6904 // No device check
6905 // DeviceGuard omitted
6906 return at::native::special_chebyshev_polynomial_t_out(x, n, out);
6907}
6908} // anonymous namespace
6909namespace {
6910at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n) {
6911 // No device check
6912 // DeviceGuard omitted
6913 return at::native::special_chebyshev_polynomial_t(x, n);
6914}
6915} // anonymous namespace
6916namespace {
6917at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_u(const at::Scalar & x, const at::Tensor & n) {
6918 // No device check
6919 // DeviceGuard omitted
6920 return at::native::special_chebyshev_polynomial_u(x, n);
6921}
6922} // anonymous namespace
6923namespace {
6924at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_u_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
6925 // No device check
6926 // DeviceGuard omitted
6927 return at::native::special_chebyshev_polynomial_u_out(x, n, out);
6928}
6929} // anonymous namespace
6930namespace {
6931at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_u(const at::Tensor & x, const at::Scalar & n) {
6932 // No device check
6933 // DeviceGuard omitted
6934 return at::native::special_chebyshev_polynomial_u(x, n);
6935}
6936} // anonymous namespace
6937namespace {
6938at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_v(const at::Scalar & x, const at::Tensor & n) {
6939 // No device check
6940 // DeviceGuard omitted
6941 return at::native::special_chebyshev_polynomial_v(x, n);
6942}
6943} // anonymous namespace
6944namespace {
6945at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_v_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
6946 // No device check
6947 // DeviceGuard omitted
6948 return at::native::special_chebyshev_polynomial_v_out(x, n, out);
6949}
6950} // anonymous namespace
6951namespace {
6952at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_v(const at::Tensor & x, const at::Scalar & n) {
6953 // No device check
6954 // DeviceGuard omitted
6955 return at::native::special_chebyshev_polynomial_v(x, n);
6956}
6957} // anonymous namespace
6958namespace {
6959at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_w(const at::Scalar & x, const at::Tensor & n) {
6960 // No device check
6961 // DeviceGuard omitted
6962 return at::native::special_chebyshev_polynomial_w(x, n);
6963}
6964} // anonymous namespace
6965namespace {
6966at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_w_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
6967 // No device check
6968 // DeviceGuard omitted
6969 return at::native::special_chebyshev_polynomial_w_out(x, n, out);
6970}
6971} // anonymous namespace
6972namespace {
6973at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_w(const at::Tensor & x, const at::Scalar & n) {
6974 // No device check
6975 // DeviceGuard omitted
6976 return at::native::special_chebyshev_polynomial_w(x, n);
6977}
6978} // anonymous namespace
6979namespace {
6980at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_hermite_polynomial_h(const at::Scalar & x, const at::Tensor & n) {
6981 // No device check
6982 // DeviceGuard omitted
6983 return at::native::special_hermite_polynomial_h(x, n);
6984}
6985} // anonymous namespace
6986namespace {
6987at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_h_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
6988 // No device check
6989 // DeviceGuard omitted
6990 return at::native::special_hermite_polynomial_h_out(x, n, out);
6991}
6992} // anonymous namespace
6993namespace {
6994at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_hermite_polynomial_h(const at::Tensor & x, const at::Scalar & n) {
6995 // No device check
6996 // DeviceGuard omitted
6997 return at::native::special_hermite_polynomial_h(x, n);
6998}
6999} // anonymous namespace
7000namespace {
7001at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_hermite_polynomial_he(const at::Scalar & x, const at::Tensor & n) {
7002 // No device check
7003 // DeviceGuard omitted
7004 return at::native::special_hermite_polynomial_he(x, n);
7005}
7006} // anonymous namespace
7007namespace {
7008at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_he_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
7009 // No device check
7010 // DeviceGuard omitted
7011 return at::native::special_hermite_polynomial_he_out(x, n, out);
7012}
7013} // anonymous namespace
7014namespace {
7015at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_hermite_polynomial_he(const at::Tensor & x, const at::Scalar & n) {
7016 // No device check
7017 // DeviceGuard omitted
7018 return at::native::special_hermite_polynomial_he(x, n);
7019}
7020} // anonymous namespace
7021namespace {
7022at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_laguerre_polynomial_l(const at::Scalar & x, const at::Tensor & n) {
7023 // No device check
7024 // DeviceGuard omitted
7025 return at::native::special_laguerre_polynomial_l(x, n);
7026}
7027} // anonymous namespace
7028namespace {
7029at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_laguerre_polynomial_l_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
7030 // No device check
7031 // DeviceGuard omitted
7032 return at::native::special_laguerre_polynomial_l_out(x, n, out);
7033}
7034} // anonymous namespace
7035namespace {
7036at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_laguerre_polynomial_l(const at::Tensor & x, const at::Scalar & n) {
7037 // No device check
7038 // DeviceGuard omitted
7039 return at::native::special_laguerre_polynomial_l(x, n);
7040}
7041} // anonymous namespace
7042namespace {
7043at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_legendre_polynomial_p(const at::Scalar & x, const at::Tensor & n) {
7044 // No device check
7045 // DeviceGuard omitted
7046 return at::native::special_legendre_polynomial_p(x, n);
7047}
7048} // anonymous namespace
7049namespace {
7050at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_legendre_polynomial_p_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
7051 // No device check
7052 // DeviceGuard omitted
7053 return at::native::special_legendre_polynomial_p_out(x, n, out);
7054}
7055} // anonymous namespace
7056namespace {
7057at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_legendre_polynomial_p(const at::Tensor & x, const at::Scalar & n) {
7058 // No device check
7059 // DeviceGuard omitted
7060 return at::native::special_legendre_polynomial_p(x, n);
7061}
7062} // anonymous namespace
7063namespace {
7064at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n) {
7065 // No device check
7066 // DeviceGuard omitted
7067 return at::native::special_shifted_chebyshev_polynomial_t(x, n);
7068}
7069} // anonymous namespace
7070namespace {
7071at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_t_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
7072 // No device check
7073 // DeviceGuard omitted
7074 return at::native::special_shifted_chebyshev_polynomial_t_out(x, n, out);
7075}
7076} // anonymous namespace
7077namespace {
7078at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n) {
7079 // No device check
7080 // DeviceGuard omitted
7081 return at::native::special_shifted_chebyshev_polynomial_t(x, n);
7082}
7083} // anonymous namespace
7084namespace {
7085at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_u(const at::Scalar & x, const at::Tensor & n) {
7086 // No device check
7087 // DeviceGuard omitted
7088 return at::native::special_shifted_chebyshev_polynomial_u(x, n);
7089}
7090} // anonymous namespace
7091namespace {
7092at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_u_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
7093 // No device check
7094 // DeviceGuard omitted
7095 return at::native::special_shifted_chebyshev_polynomial_u_out(x, n, out);
7096}
7097} // anonymous namespace
7098namespace {
7099at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Scalar & n) {
7100 // No device check
7101 // DeviceGuard omitted
7102 return at::native::special_shifted_chebyshev_polynomial_u(x, n);
7103}
7104} // anonymous namespace
7105namespace {
7106at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_v(const at::Scalar & x, const at::Tensor & n) {
7107 // No device check
7108 // DeviceGuard omitted
7109 return at::native::special_shifted_chebyshev_polynomial_v(x, n);
7110}
7111} // anonymous namespace
7112namespace {
7113at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_v_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
7114 // No device check
7115 // DeviceGuard omitted
7116 return at::native::special_shifted_chebyshev_polynomial_v_out(x, n, out);
7117}
7118} // anonymous namespace
7119namespace {
7120at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Scalar & n) {
7121 // No device check
7122 // DeviceGuard omitted
7123 return at::native::special_shifted_chebyshev_polynomial_v(x, n);
7124}
7125} // anonymous namespace
7126namespace {
7127at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_w(const at::Scalar & x, const at::Tensor & n) {
7128 // No device check
7129 // DeviceGuard omitted
7130 return at::native::special_shifted_chebyshev_polynomial_w(x, n);
7131}
7132} // anonymous namespace
7133namespace {
7134at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_w_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
7135 // No device check
7136 // DeviceGuard omitted
7137 return at::native::special_shifted_chebyshev_polynomial_w_out(x, n, out);
7138}
7139} // anonymous namespace
7140namespace {
7141at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Scalar & n) {
7142 // No device check
7143 // DeviceGuard omitted
7144 return at::native::special_shifted_chebyshev_polynomial_w(x, n);
7145}
7146} // anonymous namespace
7147TORCH_LIBRARY_IMPL(aten, CompositeImplicitAutograd, m) {
7148 m.impl("_cast_Byte",
7149TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Byte));
7150m.impl("_cast_Char",
7151TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Char));
7152m.impl("_cast_Double",
7153TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Double));
7154m.impl("_cast_Float",
7155TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Float));
7156m.impl("_cast_Int",
7157TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Int));
7158m.impl("_cast_Long",
7159TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Long));
7160m.impl("_cast_Short",
7161TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Short));
7162m.impl("_cast_Half",
7163TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Half));
7164m.impl("_backward",
7165TORCH_FN(wrapper_CompositeImplicitAutograd___backward));
7166m.impl("set_data",
7167TORCH_FN(wrapper_CompositeImplicitAutograd__set_data));
7168m.impl("data",
7169TORCH_FN(wrapper_CompositeImplicitAutograd__data));
7170m.impl("is_leaf",
7171TORCH_FN(wrapper_CompositeImplicitAutograd__is_leaf));
7172m.impl("output_nr",
7173TORCH_FN(wrapper_CompositeImplicitAutograd__output_nr));
7174m.impl("_version",
7175TORCH_FN(wrapper_CompositeImplicitAutograd___version));
7176m.impl("requires_grad_",
7177TORCH_FN(wrapper_CompositeImplicitAutograd__requires_grad_));
7178m.impl("retain_grad",
7179TORCH_FN(wrapper_CompositeImplicitAutograd__retain_grad));
7180m.impl("retains_grad",
7181TORCH_FN(wrapper_CompositeImplicitAutograd__retains_grad));
7182m.impl("_unpack_dual",
7183TORCH_FN(wrapper_CompositeImplicitAutograd___unpack_dual));
7184m.impl("rename_",
7185TORCH_FN(wrapper_CompositeImplicitAutograd__rename_));
7186m.impl("rename",
7187TORCH_FN(wrapper_CompositeImplicitAutograd__rename));
7188m.impl("align_to",
7189TORCH_FN(wrapper_CompositeImplicitAutograd__align_to));
7190m.impl("align_to.ellipsis_idx",
7191TORCH_FN(wrapper_CompositeImplicitAutograd_ellipsis_idx_align_to));
7192m.impl("align_as",
7193TORCH_FN(wrapper_CompositeImplicitAutograd__align_as));
7194m.impl("align_tensors",
7195TORCH_FN(wrapper_CompositeImplicitAutograd__align_tensors));
7196m.impl("_assert_tensor_metadata",
7197TORCH_FN(wrapper_CompositeImplicitAutograd___assert_tensor_metadata));
7198m.impl("refine_names",
7199TORCH_FN(wrapper_CompositeImplicitAutograd__refine_names));
7200m.impl("_use_cudnn_rnn_flatten_weight",
7201TORCH_FN(wrapper_CompositeImplicitAutograd___use_cudnn_rnn_flatten_weight));
7202m.impl("_debug_has_internal_overlap",
7203TORCH_FN(wrapper_CompositeImplicitAutograd___debug_has_internal_overlap));
7204m.impl("_sobol_engine_draw",
7205TORCH_FN(wrapper_CompositeImplicitAutograd___sobol_engine_draw));
7206m.impl("_sobol_engine_ff_",
7207TORCH_FN(wrapper_CompositeImplicitAutograd___sobol_engine_ff_));
7208m.impl("_sobol_engine_scramble_",
7209TORCH_FN(wrapper_CompositeImplicitAutograd___sobol_engine_scramble_));
7210m.impl("_sobol_engine_initialize_state_",
7211TORCH_FN(wrapper_CompositeImplicitAutograd___sobol_engine_initialize_state_));
7212m.impl("_reshape_from_tensor",
7213TORCH_FN(wrapper_CompositeImplicitAutograd___reshape_from_tensor));
7214m.impl("_shape_as_tensor",
7215TORCH_FN(wrapper_CompositeImplicitAutograd___shape_as_tensor));
7216m.impl("dropout",
7217TORCH_FN(wrapper_CompositeImplicitAutograd__dropout));
7218m.impl("dropout_",
7219TORCH_FN(wrapper_CompositeImplicitAutograd__dropout_));
7220m.impl("feature_dropout",
7221TORCH_FN(wrapper_CompositeImplicitAutograd__feature_dropout));
7222m.impl("feature_dropout_",
7223TORCH_FN(wrapper_CompositeImplicitAutograd__feature_dropout_));
7224m.impl("alpha_dropout",
7225TORCH_FN(wrapper_CompositeImplicitAutograd__alpha_dropout));
7226m.impl("alpha_dropout_",
7227TORCH_FN(wrapper_CompositeImplicitAutograd__alpha_dropout_));
7228m.impl("feature_alpha_dropout",
7229TORCH_FN(wrapper_CompositeImplicitAutograd__feature_alpha_dropout));
7230m.impl("feature_alpha_dropout_",
7231TORCH_FN(wrapper_CompositeImplicitAutograd__feature_alpha_dropout_));
7232m.impl("absolute",
7233TORCH_FN(wrapper_CompositeImplicitAutograd__absolute));
7234m.impl("absolute.out",
7235TORCH_FN(wrapper_CompositeImplicitAutograd_out_absolute_out));
7236m.impl("absolute_",
7237TORCH_FN(wrapper_CompositeImplicitAutograd__absolute_));
7238m.impl("chalf",
7239TORCH_FN(wrapper_CompositeImplicitAutograd__chalf));
7240m.impl("real",
7241TORCH_FN(wrapper_CompositeImplicitAutograd__real));
7242m.impl("imag",
7243TORCH_FN(wrapper_CompositeImplicitAutograd__imag));
7244m.impl("conj",
7245TORCH_FN(wrapper_CompositeImplicitAutograd__conj));
7246m.impl("conj_physical",
7247TORCH_FN(wrapper_CompositeImplicitAutograd__conj_physical));
7248m.impl("resolve_conj",
7249TORCH_FN(wrapper_CompositeImplicitAutograd__resolve_conj));
7250m.impl("resolve_neg",
7251TORCH_FN(wrapper_CompositeImplicitAutograd__resolve_neg));
7252m.impl("arccos",
7253TORCH_FN(wrapper_CompositeImplicitAutograd__arccos));
7254m.impl("arccos.out",
7255TORCH_FN(wrapper_CompositeImplicitAutograd_out_arccos_out));
7256m.impl("arccos_",
7257TORCH_FN(wrapper_CompositeImplicitAutograd__arccos_));
7258m.impl("avg_pool1d",
7259TORCH_FN(wrapper_CompositeImplicitAutograd__avg_pool1d));
7260m.impl("adaptive_avg_pool1d",
7261TORCH_FN(wrapper_CompositeImplicitAutograd__adaptive_avg_pool1d));
7262m.impl("adaptive_max_pool1d",
7263TORCH_FN(wrapper_CompositeImplicitAutograd__adaptive_max_pool1d));
7264m.impl("affine_grid_generator_backward",
7265TORCH_FN(wrapper_CompositeImplicitAutograd__affine_grid_generator_backward));
7266m.impl("_test_check_tensor",
7267TORCH_FN(wrapper_CompositeImplicitAutograd___test_check_tensor));
7268m.impl("all.dimname",
7269TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_all));
7270m.impl("all.dimname_out",
7271TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_all_out));
7272m.impl("any.dimname",
7273TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_any));
7274m.impl("any.dimname_out",
7275TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_any_out));
7276m.impl("_dim_arange",
7277TORCH_FN(wrapper_CompositeImplicitAutograd___dim_arange));
7278m.impl("arccosh",
7279TORCH_FN(wrapper_CompositeImplicitAutograd__arccosh));
7280m.impl("arccosh.out",
7281TORCH_FN(wrapper_CompositeImplicitAutograd_out_arccosh_out));
7282m.impl("arccosh_",
7283TORCH_FN(wrapper_CompositeImplicitAutograd__arccosh_));
7284m.impl("arcsinh",
7285TORCH_FN(wrapper_CompositeImplicitAutograd__arcsinh));
7286m.impl("arcsinh.out",
7287TORCH_FN(wrapper_CompositeImplicitAutograd_out_arcsinh_out));
7288m.impl("arcsinh_",
7289TORCH_FN(wrapper_CompositeImplicitAutograd__arcsinh_));
7290m.impl("arctanh",
7291TORCH_FN(wrapper_CompositeImplicitAutograd__arctanh));
7292m.impl("arctanh.out",
7293TORCH_FN(wrapper_CompositeImplicitAutograd_out_arctanh_out));
7294m.impl("arctanh_",
7295TORCH_FN(wrapper_CompositeImplicitAutograd__arctanh_));
7296m.impl("arcsin",
7297TORCH_FN(wrapper_CompositeImplicitAutograd__arcsin));
7298m.impl("arcsin.out",
7299TORCH_FN(wrapper_CompositeImplicitAutograd_out_arcsin_out));
7300m.impl("arcsin_",
7301TORCH_FN(wrapper_CompositeImplicitAutograd__arcsin_));
7302m.impl("arctan",
7303TORCH_FN(wrapper_CompositeImplicitAutograd__arctan));
7304m.impl("arctan.out",
7305TORCH_FN(wrapper_CompositeImplicitAutograd_out_arctan_out));
7306m.impl("arctan_",
7307TORCH_FN(wrapper_CompositeImplicitAutograd__arctan_));
7308m.impl("atleast_1d",
7309TORCH_FN(wrapper_CompositeImplicitAutograd__atleast_1d));
7310m.impl("atleast_1d.Sequence",
7311TORCH_FN(wrapper_CompositeImplicitAutograd_Sequence_atleast_1d));
7312m.impl("atleast_2d",
7313TORCH_FN(wrapper_CompositeImplicitAutograd__atleast_2d));
7314m.impl("atleast_2d.Sequence",
7315TORCH_FN(wrapper_CompositeImplicitAutograd_Sequence_atleast_2d));
7316m.impl("atleast_3d",
7317TORCH_FN(wrapper_CompositeImplicitAutograd__atleast_3d));
7318m.impl("atleast_3d.Sequence",
7319TORCH_FN(wrapper_CompositeImplicitAutograd_Sequence_atleast_3d));
7320m.impl("batch_norm",
7321TORCH_FN(wrapper_CompositeImplicitAutograd__batch_norm));
7322m.impl("_batch_norm_impl_index",
7323TORCH_FN(wrapper_CompositeImplicitAutograd___batch_norm_impl_index));
7324m.impl("_batch_norm_impl_index_backward",
7325TORCH_FN(wrapper_CompositeImplicitAutograd___batch_norm_impl_index_backward));
7326m.impl("bilinear",
7327TORCH_FN(wrapper_CompositeImplicitAutograd__bilinear));
7328m.impl("broadcast_tensors",
7329TORCH_FN(wrapper_CompositeImplicitAutograd__broadcast_tensors));
7330m.impl("broadcast_to",
7331TORCH_FN(wrapper_CompositeImplicitAutograd__broadcast_to));
7332m.impl("cat.names",
7333TORCH_FN(wrapper_CompositeImplicitAutograd_names_cat));
7334m.impl("cat.names_out",
7335TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_cat_out));
7336m.impl("concat",
7337TORCH_FN(wrapper_CompositeImplicitAutograd__concat));
7338m.impl("concat.out",
7339TORCH_FN(wrapper_CompositeImplicitAutograd_out_concat_out));
7340m.impl("concat.names",
7341TORCH_FN(wrapper_CompositeImplicitAutograd_names_concat));
7342m.impl("concat.names_out",
7343TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_concat_out));
7344m.impl("concatenate",
7345TORCH_FN(wrapper_CompositeImplicitAutograd__concatenate));
7346m.impl("concatenate.out",
7347TORCH_FN(wrapper_CompositeImplicitAutograd_out_concatenate_out));
7348m.impl("concatenate.names",
7349TORCH_FN(wrapper_CompositeImplicitAutograd_names_concatenate));
7350m.impl("concatenate.names_out",
7351TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_concatenate_out));
7352m.impl("chain_matmul",
7353TORCH_FN(wrapper_CompositeImplicitAutograd__chain_matmul));
7354m.impl("chain_matmul.out",
7355TORCH_FN(wrapper_CompositeImplicitAutograd_out_chain_matmul_out));
7356m.impl("unsafe_chunk",
7357TORCH_FN(wrapper_CompositeImplicitAutograd__unsafe_chunk));
7358m.impl("chunk",
7359TORCH_FN(wrapper_CompositeImplicitAutograd__chunk));
7360m.impl("tensor_split.sections",
7361TORCH_FN(wrapper_CompositeImplicitAutograd_sections_tensor_split));
7362m.impl("tensor_split.indices",
7363TORCH_FN(wrapper_CompositeImplicitAutograd_indices_tensor_split));
7364m.impl("tensor_split.tensor_indices_or_sections",
7365TORCH_FN(wrapper_CompositeImplicitAutograd_tensor_indices_or_sections_tensor_split));
7366m.impl("clip",
7367TORCH_FN(wrapper_CompositeImplicitAutograd__clip));
7368m.impl("clip.out",
7369TORCH_FN(wrapper_CompositeImplicitAutograd_out_clip_out));
7370m.impl("clip_",
7371TORCH_FN(wrapper_CompositeImplicitAutograd__clip_));
7372m.impl("clip.Tensor",
7373TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_clip));
7374m.impl("clip.Tensor_out",
7375TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_out_clip_out));
7376m.impl("clip_.Tensor",
7377TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_clip_));
7378m.impl("cudnn_is_acceptable",
7379TORCH_FN(wrapper_CompositeImplicitAutograd__cudnn_is_acceptable));
7380m.impl("contiguous",
7381TORCH_FN(wrapper_CompositeImplicitAutograd__contiguous));
7382m.impl("_convolution.deprecated",
7383TORCH_FN(wrapper_CompositeImplicitAutograd_deprecated__convolution));
7384m.impl("_convolution_mode",
7385TORCH_FN(wrapper_CompositeImplicitAutograd___convolution_mode));
7386m.impl("_convolution_double_backward",
7387TORCH_FN(wrapper_CompositeImplicitAutograd___convolution_double_backward));
7388m.impl("conv1d",
7389TORCH_FN(wrapper_CompositeImplicitAutograd__conv1d));
7390m.impl("conv2d",
7391TORCH_FN(wrapper_CompositeImplicitAutograd__conv2d));
7392m.impl("conv3d",
7393TORCH_FN(wrapper_CompositeImplicitAutograd__conv3d));
7394m.impl("conv1d.padding",
7395TORCH_FN(wrapper_CompositeImplicitAutograd_padding_conv1d));
7396m.impl("conv2d.padding",
7397TORCH_FN(wrapper_CompositeImplicitAutograd_padding_conv2d));
7398m.impl("conv3d.padding",
7399TORCH_FN(wrapper_CompositeImplicitAutograd_padding_conv3d));
7400m.impl("conv_tbc_backward",
7401TORCH_FN(wrapper_CompositeImplicitAutograd__conv_tbc_backward));
7402m.impl("conv_transpose1d",
7403TORCH_FN(wrapper_CompositeImplicitAutograd__conv_transpose1d));
7404m.impl("conv_transpose2d.input",
7405TORCH_FN(wrapper_CompositeImplicitAutograd_input_conv_transpose2d));
7406m.impl("conv_transpose3d.input",
7407TORCH_FN(wrapper_CompositeImplicitAutograd_input_conv_transpose3d));
7408m.impl("cosine_embedding_loss",
7409TORCH_FN(wrapper_CompositeImplicitAutograd__cosine_embedding_loss));
7410m.impl("cov",
7411TORCH_FN(wrapper_CompositeImplicitAutograd__cov));
7412m.impl("corrcoef",
7413TORCH_FN(wrapper_CompositeImplicitAutograd__corrcoef));
7414m.impl("cummax.dimname",
7415TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_cummax));
7416m.impl("cummax.dimname_out",
7417TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_cummax_out));
7418m.impl("cummin.dimname",
7419TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_cummin));
7420m.impl("cummin.dimname_out",
7421TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_cummin_out));
7422m.impl("cummaxmin_backward",
7423TORCH_FN(wrapper_CompositeImplicitAutograd__cummaxmin_backward));
7424m.impl("cumprod.dimname",
7425TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_cumprod));
7426m.impl("cumprod.dimname_out",
7427TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_cumprod_out));
7428m.impl("cumprod_.dimname",
7429TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_cumprod_));
7430m.impl("cumprod_backward",
7431TORCH_FN(wrapper_CompositeImplicitAutograd__cumprod_backward));
7432m.impl("cumsum.dimname",
7433TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_cumsum));
7434m.impl("cumsum.dimname_out",
7435TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_cumsum_out));
7436m.impl("cumsum_.dimname",
7437TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_cumsum_));
7438m.impl("cumulative_trapezoid.x",
7439TORCH_FN(wrapper_CompositeImplicitAutograd_x_cumulative_trapezoid));
7440m.impl("cumulative_trapezoid.dx",
7441TORCH_FN(wrapper_CompositeImplicitAutograd_dx_cumulative_trapezoid));
7442m.impl("ctc_loss.IntList",
7443TORCH_FN(wrapper_CompositeImplicitAutograd_IntList_ctc_loss));
7444m.impl("ctc_loss.Tensor",
7445TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_ctc_loss));
7446m.impl("diagflat",
7447TORCH_FN(wrapper_CompositeImplicitAutograd__diagflat));
7448m.impl("linalg_diagonal",
7449TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_diagonal));
7450m.impl("diagonal.Dimname",
7451TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_diagonal));
7452m.impl("fill_diagonal_",
7453TORCH_FN(wrapper_CompositeImplicitAutograd__fill_diagonal_));
7454m.impl("diff",
7455TORCH_FN(wrapper_CompositeImplicitAutograd__diff));
7456m.impl("diff.out",
7457TORCH_FN(wrapper_CompositeImplicitAutograd_out_diff_out));
7458m.impl("gradient.scalarint",
7459TORCH_FN(wrapper_CompositeImplicitAutograd_scalarint_gradient));
7460m.impl("gradient.scalararray",
7461TORCH_FN(wrapper_CompositeImplicitAutograd_scalararray_gradient));
7462m.impl("gradient.array",
7463TORCH_FN(wrapper_CompositeImplicitAutograd_array_gradient));
7464m.impl("gradient.scalarrayint",
7465TORCH_FN(wrapper_CompositeImplicitAutograd_scalarrayint_gradient));
7466m.impl("gradient.scalarrayarray",
7467TORCH_FN(wrapper_CompositeImplicitAutograd_scalarrayarray_gradient));
7468m.impl("gradient.tensorarrayint",
7469TORCH_FN(wrapper_CompositeImplicitAutograd_tensorarrayint_gradient));
7470m.impl("gradient.tensorarray",
7471TORCH_FN(wrapper_CompositeImplicitAutograd_tensorarray_gradient));
7472m.impl("divide.Tensor",
7473TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_divide));
7474m.impl("divide.out",
7475TORCH_FN(wrapper_CompositeImplicitAutograd_out_divide_out));
7476m.impl("divide_.Tensor",
7477TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_divide_));
7478m.impl("divide.Scalar",
7479TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_divide));
7480m.impl("divide_.Scalar",
7481TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_divide_));
7482m.impl("divide.Tensor_mode",
7483TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_mode_divide));
7484m.impl("divide.out_mode",
7485TORCH_FN(wrapper_CompositeImplicitAutograd_out_mode_divide_out));
7486m.impl("divide_.Tensor_mode",
7487TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_mode_divide_));
7488m.impl("divide.Scalar_mode",
7489TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_mode_divide));
7490m.impl("divide_.Scalar_mode",
7491TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_mode_divide_));
7492m.impl("true_divide.Tensor",
7493TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_true_divide));
7494m.impl("true_divide.out",
7495TORCH_FN(wrapper_CompositeImplicitAutograd_out_true_divide_out));
7496m.impl("true_divide_.Tensor",
7497TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_true_divide_));
7498m.impl("true_divide.Scalar",
7499TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_true_divide));
7500m.impl("true_divide_.Scalar",
7501TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_true_divide_));
7502m.impl("einsum",
7503TORCH_FN(wrapper_CompositeImplicitAutograd__einsum));
7504m.impl("embedding_backward",
7505TORCH_FN(wrapper_CompositeImplicitAutograd__embedding_backward));
7506m.impl("embedding_sparse_backward",
7507TORCH_FN(wrapper_CompositeImplicitAutograd__embedding_sparse_backward));
7508m.impl("_rowwise_prune",
7509TORCH_FN(wrapper_CompositeImplicitAutograd___rowwise_prune));
7510m.impl("row_stack",
7511TORCH_FN(wrapper_CompositeImplicitAutograd__row_stack));
7512m.impl("row_stack.out",
7513TORCH_FN(wrapper_CompositeImplicitAutograd_out_row_stack_out));
7514m.impl("embedding_bag",
7515TORCH_FN(wrapper_CompositeImplicitAutograd__embedding_bag));
7516m.impl("embedding_bag.padding_idx",
7517TORCH_FN(wrapper_CompositeImplicitAutograd_padding_idx_embedding_bag));
7518m.impl("_embedding_bag_backward",
7519TORCH_FN(wrapper_CompositeImplicitAutograd___embedding_bag_backward));
7520m.impl("_embedding_bag_sparse_backward",
7521TORCH_FN(wrapper_CompositeImplicitAutograd___embedding_bag_sparse_backward));
7522m.impl("empty.out",
7523TORCH_FN(wrapper_CompositeImplicitAutograd_out_empty_out));
7524m.impl("expand_as",
7525TORCH_FN(wrapper_CompositeImplicitAutograd__expand_as));
7526m.impl("flatten.using_ints",
7527TORCH_FN(wrapper_CompositeImplicitAutograd_using_ints_flatten));
7528m.impl("flatten.named_out_dim",
7529TORCH_FN(wrapper_CompositeImplicitAutograd_named_out_dim_flatten));
7530m.impl("flatten.using_names",
7531TORCH_FN(wrapper_CompositeImplicitAutograd_using_names_flatten));
7532m.impl("flatten.DimnameList",
7533TORCH_FN(wrapper_CompositeImplicitAutograd_DimnameList_flatten));
7534m.impl("unflatten.int",
7535TORCH_FN(wrapper_CompositeImplicitAutograd_int_unflatten));
7536m.impl("unflatten.Dimname",
7537TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_unflatten));
7538m.impl("floor_divide.Scalar",
7539TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_floor_divide));
7540m.impl("floor_divide_.Scalar",
7541TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_floor_divide_));
7542m.impl("grid_sampler",
7543TORCH_FN(wrapper_CompositeImplicitAutograd__grid_sampler));
7544m.impl("_grid_sampler_2d_cpu_fallback_backward",
7545TORCH_FN(wrapper_CompositeImplicitAutograd___grid_sampler_2d_cpu_fallback_backward));
7546m.impl("hinge_embedding_loss",
7547TORCH_FN(wrapper_CompositeImplicitAutograd__hinge_embedding_loss));
7548m.impl("group_norm",
7549TORCH_FN(wrapper_CompositeImplicitAutograd__group_norm));
7550m.impl("_cufft_get_plan_cache_size",
7551TORCH_FN(wrapper_CompositeImplicitAutograd___cufft_get_plan_cache_size));
7552m.impl("_cufft_get_plan_cache_max_size",
7553TORCH_FN(wrapper_CompositeImplicitAutograd___cufft_get_plan_cache_max_size));
7554m.impl("_cufft_set_plan_cache_max_size",
7555TORCH_FN(wrapper_CompositeImplicitAutograd___cufft_set_plan_cache_max_size));
7556m.impl("_cufft_clear_plan_cache",
7557TORCH_FN(wrapper_CompositeImplicitAutograd___cufft_clear_plan_cache));
7558m.impl("index_copy_.dimname",
7559TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_index_copy_));
7560m.impl("index_copy.dimname",
7561TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_index_copy));
7562m.impl("instance_norm",
7563TORCH_FN(wrapper_CompositeImplicitAutograd__instance_norm));
7564m.impl("isclose",
7565TORCH_FN(wrapper_CompositeImplicitAutograd__isclose));
7566m.impl("is_distributed",
7567TORCH_FN(wrapper_CompositeImplicitAutograd__is_distributed));
7568m.impl("is_floating_point",
7569TORCH_FN(wrapper_CompositeImplicitAutograd__is_floating_point));
7570m.impl("is_complex",
7571TORCH_FN(wrapper_CompositeImplicitAutograd__is_complex));
7572m.impl("is_conj",
7573TORCH_FN(wrapper_CompositeImplicitAutograd__is_conj));
7574m.impl("_is_zerotensor",
7575TORCH_FN(wrapper_CompositeImplicitAutograd___is_zerotensor));
7576m.impl("is_neg",
7577TORCH_FN(wrapper_CompositeImplicitAutograd__is_neg));
7578m.impl("isreal",
7579TORCH_FN(wrapper_CompositeImplicitAutograd__isreal));
7580m.impl("is_nonzero",
7581TORCH_FN(wrapper_CompositeImplicitAutograd__is_nonzero));
7582m.impl("is_signed",
7583TORCH_FN(wrapper_CompositeImplicitAutograd__is_signed));
7584m.impl("is_inference",
7585TORCH_FN(wrapper_CompositeImplicitAutograd__is_inference));
7586m.impl("kl_div",
7587TORCH_FN(wrapper_CompositeImplicitAutograd__kl_div));
7588m.impl("kron",
7589TORCH_FN(wrapper_CompositeImplicitAutograd__kron));
7590m.impl("kron.out",
7591TORCH_FN(wrapper_CompositeImplicitAutograd_out_kron_out));
7592m.impl("kthvalue.dimname",
7593TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_kthvalue));
7594m.impl("kthvalue.dimname_out",
7595TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_kthvalue_out));
7596m.impl("layer_norm",
7597TORCH_FN(wrapper_CompositeImplicitAutograd__layer_norm));
7598m.impl("linear",
7599TORCH_FN(wrapper_CompositeImplicitAutograd__linear));
7600m.impl("fbgemm_linear_int8_weight_fp32_activation",
7601TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_linear_int8_weight_fp32_activation));
7602m.impl("fbgemm_linear_int8_weight",
7603TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_linear_int8_weight));
7604m.impl("fbgemm_linear_quantize_weight",
7605TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_linear_quantize_weight));
7606m.impl("fbgemm_pack_gemm_matrix_fp16",
7607TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_pack_gemm_matrix_fp16));
7608m.impl("fbgemm_linear_fp16_weight_fp32_activation",
7609TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_linear_fp16_weight_fp32_activation));
7610m.impl("fbgemm_linear_fp16_weight",
7611TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_linear_fp16_weight));
7612m.impl("fbgemm_pack_quantized_matrix",
7613TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_pack_quantized_matrix));
7614m.impl("fbgemm_pack_quantized_matrix.KN",
7615TORCH_FN(wrapper_CompositeImplicitAutograd_KN_fbgemm_pack_quantized_matrix));
7616m.impl("ldexp.Tensor",
7617TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_ldexp));
7618m.impl("ldexp.out",
7619TORCH_FN(wrapper_CompositeImplicitAutograd_out_ldexp_out));
7620m.impl("ldexp_",
7621TORCH_FN(wrapper_CompositeImplicitAutograd__ldexp_));
7622m.impl("log_softmax.int",
7623TORCH_FN(wrapper_CompositeImplicitAutograd_int_log_softmax));
7624m.impl("log_softmax.Dimname",
7625TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_log_softmax));
7626m.impl("logcumsumexp.dimname",
7627TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_logcumsumexp));
7628m.impl("logcumsumexp.dimname_out",
7629TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_logcumsumexp_out));
7630m.impl("logsumexp.names",
7631TORCH_FN(wrapper_CompositeImplicitAutograd_names_logsumexp));
7632m.impl("logsumexp.names_out",
7633TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_logsumexp_out));
7634m.impl("margin_ranking_loss",
7635TORCH_FN(wrapper_CompositeImplicitAutograd__margin_ranking_loss));
7636m.impl("matmul",
7637TORCH_FN(wrapper_CompositeImplicitAutograd__matmul));
7638m.impl("matmul.out",
7639TORCH_FN(wrapper_CompositeImplicitAutograd_out_matmul_out));
7640m.impl("matrix_power",
7641TORCH_FN(wrapper_CompositeImplicitAutograd__matrix_power));
7642m.impl("matrix_power.out",
7643TORCH_FN(wrapper_CompositeImplicitAutograd_out_matrix_power_out));
7644m.impl("matrix_exp",
7645TORCH_FN(wrapper_CompositeImplicitAutograd__matrix_exp));
7646m.impl("matrix_exp_backward",
7647TORCH_FN(wrapper_CompositeImplicitAutograd__matrix_exp_backward));
7648m.impl("max.names_dim",
7649TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_max));
7650m.impl("max.names_dim_max",
7651TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_max_max_out));
7652m.impl("value_selecting_reduction_backward",
7653TORCH_FN(wrapper_CompositeImplicitAutograd__value_selecting_reduction_backward));
7654m.impl("max_pool1d_with_indices",
7655TORCH_FN(wrapper_CompositeImplicitAutograd__max_pool1d_with_indices));
7656m.impl("max_pool1d",
7657TORCH_FN(wrapper_CompositeImplicitAutograd__max_pool1d));
7658m.impl("max_pool2d",
7659TORCH_FN(wrapper_CompositeImplicitAutograd__max_pool2d));
7660m.impl("max_pool3d",
7661TORCH_FN(wrapper_CompositeImplicitAutograd__max_pool3d));
7662m.impl("mean.names_dim",
7663TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_mean));
7664m.impl("mean.names_out",
7665TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_mean_out));
7666m.impl("nanmean",
7667TORCH_FN(wrapper_CompositeImplicitAutograd__nanmean));
7668m.impl("nanmean.out",
7669TORCH_FN(wrapper_CompositeImplicitAutograd_out_nanmean_out));
7670m.impl("median.names_dim",
7671TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_median));
7672m.impl("median.names_dim_values",
7673TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_values_median_out));
7674m.impl("nanmedian.names_dim",
7675TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_nanmedian));
7676m.impl("nanmedian.names_dim_values",
7677TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_values_nanmedian_out));
7678m.impl("min.names_dim",
7679TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_min));
7680m.impl("min.names_dim_min",
7681TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_min_min_out));
7682m.impl("_sparse_mm",
7683TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_mm));
7684m.impl("_sparse_mm.reduce",
7685TORCH_FN(wrapper_CompositeImplicitAutograd_reduce__sparse_mm));
7686m.impl("mode.dimname",
7687TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_mode));
7688m.impl("mode.dimname_out",
7689TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_mode_out));
7690m.impl("multiply.Tensor",
7691TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_multiply));
7692m.impl("multiply.out",
7693TORCH_FN(wrapper_CompositeImplicitAutograd_out_multiply_out));
7694m.impl("multiply_.Tensor",
7695TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_multiply_));
7696m.impl("multiply.Scalar",
7697TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_multiply));
7698m.impl("multiply_.Scalar",
7699TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_multiply_));
7700m.impl("narrow",
7701TORCH_FN(wrapper_CompositeImplicitAutograd__narrow));
7702m.impl("narrow.Tensor",
7703TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_narrow));
7704m.impl("is_vulkan_available",
7705TORCH_FN(wrapper_CompositeImplicitAutograd__is_vulkan_available));
7706m.impl("_nnpack_available",
7707TORCH_FN(wrapper_CompositeImplicitAutograd___nnpack_available));
7708m.impl("pairwise_distance",
7709TORCH_FN(wrapper_CompositeImplicitAutograd__pairwise_distance));
7710m.impl("cdist",
7711TORCH_FN(wrapper_CompositeImplicitAutograd__cdist));
7712m.impl("pdist",
7713TORCH_FN(wrapper_CompositeImplicitAutograd__pdist));
7714m.impl("cosine_similarity",
7715TORCH_FN(wrapper_CompositeImplicitAutograd__cosine_similarity));
7716m.impl("movedim.intlist",
7717TORCH_FN(wrapper_CompositeImplicitAutograd_intlist_movedim));
7718m.impl("movedim.int",
7719TORCH_FN(wrapper_CompositeImplicitAutograd_int_movedim));
7720m.impl("moveaxis.intlist",
7721TORCH_FN(wrapper_CompositeImplicitAutograd_intlist_moveaxis));
7722m.impl("moveaxis.int",
7723TORCH_FN(wrapper_CompositeImplicitAutograd_int_moveaxis));
7724m.impl("numpy_T",
7725TORCH_FN(wrapper_CompositeImplicitAutograd__numpy_T));
7726m.impl("matrix_H",
7727TORCH_FN(wrapper_CompositeImplicitAutograd__matrix_H));
7728m.impl("mT",
7729TORCH_FN(wrapper_CompositeImplicitAutograd__mT));
7730m.impl("mH",
7731TORCH_FN(wrapper_CompositeImplicitAutograd__mH));
7732m.impl("adjoint",
7733TORCH_FN(wrapper_CompositeImplicitAutograd__adjoint));
7734m.impl("native_channel_shuffle",
7735TORCH_FN(wrapper_CompositeImplicitAutograd__native_channel_shuffle));
7736m.impl("pin_memory",
7737TORCH_FN(wrapper_CompositeImplicitAutograd__pin_memory));
7738m.impl("pinverse",
7739TORCH_FN(wrapper_CompositeImplicitAutograd__pinverse));
7740m.impl("poisson_nll_loss",
7741TORCH_FN(wrapper_CompositeImplicitAutograd__poisson_nll_loss));
7742m.impl("rand.generator_out",
7743TORCH_FN(wrapper_CompositeImplicitAutograd_generator_out_rand_out));
7744m.impl("randn.out",
7745TORCH_FN(wrapper_CompositeImplicitAutograd_out_randn_out));
7746m.impl("randn.generator_out",
7747TORCH_FN(wrapper_CompositeImplicitAutograd_generator_out_randn_out));
7748m.impl("ravel",
7749TORCH_FN(wrapper_CompositeImplicitAutograd__ravel));
7750m.impl("negative",
7751TORCH_FN(wrapper_CompositeImplicitAutograd__negative));
7752m.impl("negative.out",
7753TORCH_FN(wrapper_CompositeImplicitAutograd_out_negative_out));
7754m.impl("negative_",
7755TORCH_FN(wrapper_CompositeImplicitAutograd__negative_));
7756m.impl("repeat_interleave.self_Tensor",
7757TORCH_FN(wrapper_CompositeImplicitAutograd_self_Tensor_repeat_interleave));
7758m.impl("repeat_interleave.self_int",
7759TORCH_FN(wrapper_CompositeImplicitAutograd_self_int_repeat_interleave));
7760m.impl("reshape",
7761TORCH_FN(wrapper_CompositeImplicitAutograd__reshape));
7762m.impl("reshape_as",
7763TORCH_FN(wrapper_CompositeImplicitAutograd__reshape_as));
7764m.impl("rrelu",
7765TORCH_FN(wrapper_CompositeImplicitAutograd__rrelu));
7766m.impl("rrelu_",
7767TORCH_FN(wrapper_CompositeImplicitAutograd__rrelu_));
7768m.impl("relu6",
7769TORCH_FN(wrapper_CompositeImplicitAutograd__relu6));
7770m.impl("relu6_",
7771TORCH_FN(wrapper_CompositeImplicitAutograd__relu6_));
7772m.impl("prelu",
7773TORCH_FN(wrapper_CompositeImplicitAutograd__prelu));
7774m.impl("infinitely_differentiable_gelu_backward",
7775TORCH_FN(wrapper_CompositeImplicitAutograd__infinitely_differentiable_gelu_backward));
7776m.impl("select.Dimname",
7777TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_select));
7778m.impl("selu",
7779TORCH_FN(wrapper_CompositeImplicitAutograd__selu));
7780m.impl("selu_",
7781TORCH_FN(wrapper_CompositeImplicitAutograd__selu_));
7782m.impl("silu_backward",
7783TORCH_FN(wrapper_CompositeImplicitAutograd__silu_backward));
7784m.impl("mish_backward",
7785TORCH_FN(wrapper_CompositeImplicitAutograd__mish_backward));
7786m.impl("size.int",
7787TORCH_FN(wrapper_CompositeImplicitAutograd_int_size));
7788m.impl("size.Dimname",
7789TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_size));
7790m.impl("smm",
7791TORCH_FN(wrapper_CompositeImplicitAutograd__smm));
7792m.impl("softmax.int",
7793TORCH_FN(wrapper_CompositeImplicitAutograd_int_softmax));
7794m.impl("softmax.Dimname",
7795TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_softmax));
7796m.impl("split.sizes",
7797TORCH_FN(wrapper_CompositeImplicitAutograd_sizes_split));
7798m.impl("hsplit.int",
7799TORCH_FN(wrapper_CompositeImplicitAutograd_int_hsplit));
7800m.impl("hsplit.array",
7801TORCH_FN(wrapper_CompositeImplicitAutograd_array_hsplit));
7802m.impl("vsplit.int",
7803TORCH_FN(wrapper_CompositeImplicitAutograd_int_vsplit));
7804m.impl("vsplit.array",
7805TORCH_FN(wrapper_CompositeImplicitAutograd_array_vsplit));
7806m.impl("dsplit.int",
7807TORCH_FN(wrapper_CompositeImplicitAutograd_int_dsplit));
7808m.impl("dsplit.array",
7809TORCH_FN(wrapper_CompositeImplicitAutograd_array_dsplit));
7810m.impl("squeeze.dimname",
7811TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_squeeze));
7812m.impl("squeeze_.dimname",
7813TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_squeeze_));
7814m.impl("sspaddmm",
7815TORCH_FN(wrapper_CompositeImplicitAutograd__sspaddmm));
7816m.impl("hstack",
7817TORCH_FN(wrapper_CompositeImplicitAutograd__hstack));
7818m.impl("hstack.out",
7819TORCH_FN(wrapper_CompositeImplicitAutograd_out_hstack_out));
7820m.impl("vstack",
7821TORCH_FN(wrapper_CompositeImplicitAutograd__vstack));
7822m.impl("vstack.out",
7823TORCH_FN(wrapper_CompositeImplicitAutograd_out_vstack_out));
7824m.impl("dstack",
7825TORCH_FN(wrapper_CompositeImplicitAutograd__dstack));
7826m.impl("dstack.out",
7827TORCH_FN(wrapper_CompositeImplicitAutograd_out_dstack_out));
7828m.impl("stft",
7829TORCH_FN(wrapper_CompositeImplicitAutograd__stft));
7830m.impl("stft.center",
7831TORCH_FN(wrapper_CompositeImplicitAutograd_center_stft));
7832m.impl("istft",
7833TORCH_FN(wrapper_CompositeImplicitAutograd__istft));
7834m.impl("stride.int",
7835TORCH_FN(wrapper_CompositeImplicitAutograd_int_stride));
7836m.impl("stride.Dimname",
7837TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_stride));
7838m.impl("sum.dim_DimnameList",
7839TORCH_FN(wrapper_CompositeImplicitAutograd_dim_DimnameList_sum));
7840m.impl("sum.DimnameList_out",
7841TORCH_FN(wrapper_CompositeImplicitAutograd_DimnameList_out_sum_out));
7842m.impl("sum_to_size",
7843TORCH_FN(wrapper_CompositeImplicitAutograd__sum_to_size));
7844m.impl("square",
7845TORCH_FN(wrapper_CompositeImplicitAutograd__square));
7846m.impl("square.out",
7847TORCH_FN(wrapper_CompositeImplicitAutograd_out_square_out));
7848m.impl("square_",
7849TORCH_FN(wrapper_CompositeImplicitAutograd__square_));
7850m.impl("std",
7851TORCH_FN(wrapper_CompositeImplicitAutograd__std));
7852m.impl("std.dim",
7853TORCH_FN(wrapper_CompositeImplicitAutograd_dim_std));
7854m.impl("std.out",
7855TORCH_FN(wrapper_CompositeImplicitAutograd_out_std_out));
7856m.impl("std_mean",
7857TORCH_FN(wrapper_CompositeImplicitAutograd__std_mean));
7858m.impl("std_mean.dim",
7859TORCH_FN(wrapper_CompositeImplicitAutograd_dim_std_mean));
7860m.impl("std_mean.names_dim",
7861TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_std_mean));
7862m.impl("std_mean.correction_names",
7863TORCH_FN(wrapper_CompositeImplicitAutograd_correction_names_std_mean));
7864m.impl("std.names_dim",
7865TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_std));
7866m.impl("std.names_out",
7867TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_std_out));
7868m.impl("std.correction_names",
7869TORCH_FN(wrapper_CompositeImplicitAutograd_correction_names_std));
7870m.impl("std.correction_names_out",
7871TORCH_FN(wrapper_CompositeImplicitAutograd_correction_names_out_std_out));
7872m.impl("prod.dim_Dimname",
7873TORCH_FN(wrapper_CompositeImplicitAutograd_dim_Dimname_prod));
7874m.impl("prod.Dimname_out",
7875TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_out_prod_out));
7876m.impl("tensordot",
7877TORCH_FN(wrapper_CompositeImplicitAutograd__tensordot));
7878m.impl("tile",
7879TORCH_FN(wrapper_CompositeImplicitAutograd__tile));
7880m.impl("transpose.Dimname",
7881TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_transpose));
7882m.impl("one_hot",
7883TORCH_FN(wrapper_CompositeImplicitAutograd__one_hot));
7884m.impl("fliplr",
7885TORCH_FN(wrapper_CompositeImplicitAutograd__fliplr));
7886m.impl("flipud",
7887TORCH_FN(wrapper_CompositeImplicitAutograd__flipud));
7888m.impl("trapezoid.x",
7889TORCH_FN(wrapper_CompositeImplicitAutograd_x_trapezoid));
7890m.impl("trapezoid.dx",
7891TORCH_FN(wrapper_CompositeImplicitAutograd_dx_trapezoid));
7892m.impl("trapz.x",
7893TORCH_FN(wrapper_CompositeImplicitAutograd_x_trapz));
7894m.impl("trapz.dx",
7895TORCH_FN(wrapper_CompositeImplicitAutograd_dx_trapz));
7896m.impl("triplet_margin_loss",
7897TORCH_FN(wrapper_CompositeImplicitAutograd__triplet_margin_loss));
7898m.impl("fix",
7899TORCH_FN(wrapper_CompositeImplicitAutograd__fix));
7900m.impl("fix.out",
7901TORCH_FN(wrapper_CompositeImplicitAutograd_out_fix_out));
7902m.impl("fix_",
7903TORCH_FN(wrapper_CompositeImplicitAutograd__fix_));
7904m.impl("type_as",
7905TORCH_FN(wrapper_CompositeImplicitAutograd__type_as));
7906m.impl("_has_compatible_shallow_copy_type",
7907TORCH_FN(wrapper_CompositeImplicitAutograd___has_compatible_shallow_copy_type));
7908m.impl("vander",
7909TORCH_FN(wrapper_CompositeImplicitAutograd__vander));
7910m.impl("var",
7911TORCH_FN(wrapper_CompositeImplicitAutograd__var));
7912m.impl("var.dim",
7913TORCH_FN(wrapper_CompositeImplicitAutograd_dim_var));
7914m.impl("var.out",
7915TORCH_FN(wrapper_CompositeImplicitAutograd_out_var_out));
7916m.impl("var.names_dim",
7917TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_var));
7918m.impl("var.names_out",
7919TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_var_out));
7920m.impl("var.correction_names",
7921TORCH_FN(wrapper_CompositeImplicitAutograd_correction_names_var));
7922m.impl("var.correction_names_out",
7923TORCH_FN(wrapper_CompositeImplicitAutograd_correction_names_out_var_out));
7924m.impl("var_mean",
7925TORCH_FN(wrapper_CompositeImplicitAutograd__var_mean));
7926m.impl("var_mean.dim",
7927TORCH_FN(wrapper_CompositeImplicitAutograd_dim_var_mean));
7928m.impl("var_mean.names_dim",
7929TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_var_mean));
7930m.impl("var_mean.correction_names",
7931TORCH_FN(wrapper_CompositeImplicitAutograd_correction_names_var_mean));
7932m.impl("view_as",
7933TORCH_FN(wrapper_CompositeImplicitAutograd__view_as));
7934m.impl("where.ScalarSelf",
7935TORCH_FN(wrapper_CompositeImplicitAutograd_ScalarSelf_where));
7936m.impl("where.ScalarOther",
7937TORCH_FN(wrapper_CompositeImplicitAutograd_ScalarOther_where));
7938m.impl("where.Scalar",
7939TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_where));
7940m.impl("where",
7941TORCH_FN(wrapper_CompositeImplicitAutograd__where));
7942m.impl("norm_except_dim",
7943TORCH_FN(wrapper_CompositeImplicitAutograd__norm_except_dim));
7944m.impl("_weight_norm",
7945TORCH_FN(wrapper_CompositeImplicitAutograd___weight_norm));
7946m.impl("_weight_norm_differentiable_backward",
7947TORCH_FN(wrapper_CompositeImplicitAutograd___weight_norm_differentiable_backward));
7948m.impl("_sparse_sum",
7949TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_sum));
7950m.impl("_sparse_sum.dtype",
7951TORCH_FN(wrapper_CompositeImplicitAutograd_dtype__sparse_sum));
7952m.impl("_sparse_sum.dim_dtype",
7953TORCH_FN(wrapper_CompositeImplicitAutograd_dim_dtype__sparse_sum));
7954m.impl("_sparse_softmax.int",
7955TORCH_FN(wrapper_CompositeImplicitAutograd_int__sparse_softmax));
7956m.impl("_sparse_softmax.Dimname",
7957TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname__sparse_softmax));
7958m.impl("_sparse_log_softmax.int",
7959TORCH_FN(wrapper_CompositeImplicitAutograd_int__sparse_log_softmax));
7960m.impl("_sparse_log_softmax.Dimname",
7961TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname__sparse_log_softmax));
7962m.impl("norm.names_ScalarOpt_dim_dtype",
7963TORCH_FN(wrapper_CompositeImplicitAutograd_names_ScalarOpt_dim_dtype_norm));
7964m.impl("norm.names_dtype_out",
7965TORCH_FN(wrapper_CompositeImplicitAutograd_names_dtype_out_norm_out));
7966m.impl("norm.names_ScalarOpt_dim",
7967TORCH_FN(wrapper_CompositeImplicitAutograd_names_ScalarOpt_dim_norm));
7968m.impl("norm.names_out",
7969TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_norm_out));
7970m.impl("frobenius_norm.dim",
7971TORCH_FN(wrapper_CompositeImplicitAutograd_dim_frobenius_norm));
7972m.impl("frobenius_norm.out",
7973TORCH_FN(wrapper_CompositeImplicitAutograd_out_frobenius_norm_out));
7974m.impl("nuclear_norm",
7975TORCH_FN(wrapper_CompositeImplicitAutograd__nuclear_norm));
7976m.impl("nuclear_norm.out",
7977TORCH_FN(wrapper_CompositeImplicitAutograd_out_nuclear_norm_out));
7978m.impl("nuclear_norm.dim",
7979TORCH_FN(wrapper_CompositeImplicitAutograd_dim_nuclear_norm));
7980m.impl("nuclear_norm.dim_out",
7981TORCH_FN(wrapper_CompositeImplicitAutograd_dim_out_nuclear_norm_out));
7982m.impl("positive",
7983TORCH_FN(wrapper_CompositeImplicitAutograd__positive));
7984m.impl("subtract.Tensor",
7985TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_subtract));
7986m.impl("subtract.out",
7987TORCH_FN(wrapper_CompositeImplicitAutograd_out_subtract_out));
7988m.impl("subtract_.Tensor",
7989TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_subtract_));
7990m.impl("subtract.Scalar",
7991TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_subtract));
7992m.impl("subtract_.Scalar",
7993TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_subtract_));
7994m.impl("sparse_compressed_tensor.comp_plain_value_size",
7995TORCH_FN(wrapper_CompositeImplicitAutograd_comp_plain_value_size_sparse_compressed_tensor));
7996m.impl("sparse_csr_tensor.crow_col_value_size",
7997TORCH_FN(wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_csr_tensor));
7998m.impl("sparse_csc_tensor.ccol_row_value_size",
7999TORCH_FN(wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_csc_tensor));
8000m.impl("sparse_bsr_tensor.crow_col_value_size",
8001TORCH_FN(wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_bsr_tensor));
8002m.impl("sparse_bsc_tensor.ccol_row_value_size",
8003TORCH_FN(wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_bsc_tensor));
8004m.impl("sparse_compressed_tensor.comp_plain_value",
8005TORCH_FN(wrapper_CompositeImplicitAutograd_comp_plain_value_sparse_compressed_tensor));
8006m.impl("sparse_csr_tensor.crow_col_value",
8007TORCH_FN(wrapper_CompositeImplicitAutograd_crow_col_value_sparse_csr_tensor));
8008m.impl("sparse_csc_tensor.ccol_row_value",
8009TORCH_FN(wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_csc_tensor));
8010m.impl("sparse_bsr_tensor.crow_col_value",
8011TORCH_FN(wrapper_CompositeImplicitAutograd_crow_col_value_sparse_bsr_tensor));
8012m.impl("sparse_bsc_tensor.ccol_row_value",
8013TORCH_FN(wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_bsc_tensor));
8014m.impl("_sparse_compressed_tensor_unsafe",
8015TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_compressed_tensor_unsafe));
8016m.impl("_sparse_csr_tensor_unsafe",
8017TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_csr_tensor_unsafe));
8018m.impl("_sparse_csc_tensor_unsafe",
8019TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_csc_tensor_unsafe));
8020m.impl("_sparse_bsr_tensor_unsafe",
8021TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_bsr_tensor_unsafe));
8022m.impl("_sparse_bsc_tensor_unsafe",
8023TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_bsc_tensor_unsafe));
8024m.impl("sparse_coo_tensor.indices",
8025TORCH_FN(wrapper_CompositeImplicitAutograd_indices_sparse_coo_tensor));
8026m.impl("sparse_coo_tensor.indices_size",
8027TORCH_FN(wrapper_CompositeImplicitAutograd_indices_size_sparse_coo_tensor));
8028m.impl("_sparse_coo_tensor_unsafe",
8029TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_coo_tensor_unsafe));
8030m.impl("_validate_sparse_coo_tensor_args",
8031TORCH_FN(wrapper_CompositeImplicitAutograd___validate_sparse_coo_tensor_args));
8032m.impl("_validate_sparse_compressed_tensor_args",
8033TORCH_FN(wrapper_CompositeImplicitAutograd___validate_sparse_compressed_tensor_args));
8034m.impl("_validate_sparse_csr_tensor_args",
8035TORCH_FN(wrapper_CompositeImplicitAutograd___validate_sparse_csr_tensor_args));
8036m.impl("_validate_sparse_csc_tensor_args",
8037TORCH_FN(wrapper_CompositeImplicitAutograd___validate_sparse_csc_tensor_args));
8038m.impl("_validate_sparse_bsr_tensor_args",
8039TORCH_FN(wrapper_CompositeImplicitAutograd___validate_sparse_bsr_tensor_args));
8040m.impl("_validate_sparse_bsc_tensor_args",
8041TORCH_FN(wrapper_CompositeImplicitAutograd___validate_sparse_bsc_tensor_args));
8042m.impl("_to_cpu",
8043TORCH_FN(wrapper_CompositeImplicitAutograd___to_cpu));
8044m.impl("to_dense",
8045TORCH_FN(wrapper_CompositeImplicitAutograd__to_dense));
8046m.impl("to_dense_backward",
8047TORCH_FN(wrapper_CompositeImplicitAutograd__to_dense_backward));
8048m.impl("coalesce",
8049TORCH_FN(wrapper_CompositeImplicitAutograd__coalesce));
8050m.impl("unbind.Dimname",
8051TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_unbind));
8052m.impl("to_mkldnn_backward",
8053TORCH_FN(wrapper_CompositeImplicitAutograd__to_mkldnn_backward));
8054m.impl("fake_quantize_per_tensor_affine",
8055TORCH_FN(wrapper_CompositeImplicitAutograd__fake_quantize_per_tensor_affine));
8056m.impl("fake_quantize_per_tensor_affine.tensor_qparams",
8057TORCH_FN(wrapper_CompositeImplicitAutograd_tensor_qparams_fake_quantize_per_tensor_affine));
8058m.impl("fake_quantize_per_tensor_affine_cachemask_backward",
8059TORCH_FN(wrapper_CompositeImplicitAutograd__fake_quantize_per_tensor_affine_cachemask_backward));
8060m.impl("fake_quantize_per_channel_affine",
8061TORCH_FN(wrapper_CompositeImplicitAutograd__fake_quantize_per_channel_affine));
8062m.impl("fake_quantize_per_channel_affine_cachemask_backward",
8063TORCH_FN(wrapper_CompositeImplicitAutograd__fake_quantize_per_channel_affine_cachemask_backward));
8064m.impl("fused_moving_avg_obs_fake_quant",
8065TORCH_FN(wrapper_CompositeImplicitAutograd__fused_moving_avg_obs_fake_quant));
8066m.impl("_choose_qparams_per_tensor",
8067TORCH_FN(wrapper_CompositeImplicitAutograd___choose_qparams_per_tensor));
8068m.impl("_saturate_weight_to_fp16",
8069TORCH_FN(wrapper_CompositeImplicitAutograd___saturate_weight_to_fp16));
8070m.impl("choose_qparams_optimized",
8071TORCH_FN(wrapper_CompositeImplicitAutograd__choose_qparams_optimized));
8072m.impl("_autocast_to_reduced_precision",
8073TORCH_FN(wrapper_CompositeImplicitAutograd___autocast_to_reduced_precision));
8074m.impl("_autocast_to_full_precision",
8075TORCH_FN(wrapper_CompositeImplicitAutograd___autocast_to_full_precision));
8076m.impl("to.dtype_layout",
8077TORCH_FN(wrapper_CompositeImplicitAutograd_dtype_layout_to));
8078m.impl("to.device",
8079TORCH_FN(wrapper_CompositeImplicitAutograd_device_to));
8080m.impl("to.dtype",
8081TORCH_FN(wrapper_CompositeImplicitAutograd_dtype_to));
8082m.impl("to.other",
8083TORCH_FN(wrapper_CompositeImplicitAutograd_other_to));
8084m.impl("meshgrid",
8085TORCH_FN(wrapper_CompositeImplicitAutograd__meshgrid));
8086m.impl("meshgrid.indexing",
8087TORCH_FN(wrapper_CompositeImplicitAutograd_indexing_meshgrid));
8088m.impl("cartesian_prod",
8089TORCH_FN(wrapper_CompositeImplicitAutograd__cartesian_prod));
8090m.impl("combinations",
8091TORCH_FN(wrapper_CompositeImplicitAutograd__combinations));
8092m.impl("item",
8093TORCH_FN(wrapper_CompositeImplicitAutograd__item));
8094m.impl("result_type.Tensor",
8095TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_result_type));
8096m.impl("result_type.Scalar",
8097TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_result_type));
8098m.impl("result_type.Scalar_Tensor",
8099TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_Tensor_result_type));
8100m.impl("result_type.Scalar_Scalar",
8101TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_Scalar_result_type));
8102m.impl("can_cast",
8103TORCH_FN(wrapper_CompositeImplicitAutograd__can_cast));
8104m.impl("promote_types",
8105TORCH_FN(wrapper_CompositeImplicitAutograd__promote_types));
8106m.impl("_thnn_fused_lstm_cell_backward",
8107TORCH_FN(wrapper_CompositeImplicitAutograd___thnn_fused_lstm_cell_backward));
8108m.impl("_thnn_differentiable_lstm_cell_backward",
8109TORCH_FN(wrapper_CompositeImplicitAutograd___thnn_differentiable_lstm_cell_backward));
8110m.impl("_thnn_differentiable_gru_cell_backward",
8111TORCH_FN(wrapper_CompositeImplicitAutograd___thnn_differentiable_gru_cell_backward));
8112m.impl("lstm.input",
8113TORCH_FN(wrapper_CompositeImplicitAutograd_input_lstm));
8114m.impl("lstm.data",
8115TORCH_FN(wrapper_CompositeImplicitAutograd_data_lstm));
8116m.impl("gru.input",
8117TORCH_FN(wrapper_CompositeImplicitAutograd_input_gru));
8118m.impl("gru.data",
8119TORCH_FN(wrapper_CompositeImplicitAutograd_data_gru));
8120m.impl("rnn_tanh.input",
8121TORCH_FN(wrapper_CompositeImplicitAutograd_input_rnn_tanh));
8122m.impl("rnn_tanh.data",
8123TORCH_FN(wrapper_CompositeImplicitAutograd_data_rnn_tanh));
8124m.impl("rnn_relu.input",
8125TORCH_FN(wrapper_CompositeImplicitAutograd_input_rnn_relu));
8126m.impl("rnn_relu.data",
8127TORCH_FN(wrapper_CompositeImplicitAutograd_data_rnn_relu));
8128m.impl("lstm_cell",
8129TORCH_FN(wrapper_CompositeImplicitAutograd__lstm_cell));
8130m.impl("gru_cell",
8131TORCH_FN(wrapper_CompositeImplicitAutograd__gru_cell));
8132m.impl("rnn_tanh_cell",
8133TORCH_FN(wrapper_CompositeImplicitAutograd__rnn_tanh_cell));
8134m.impl("rnn_relu_cell",
8135TORCH_FN(wrapper_CompositeImplicitAutograd__rnn_relu_cell));
8136m.impl("quantized_lstm_cell",
8137TORCH_FN(wrapper_CompositeImplicitAutograd__quantized_lstm_cell));
8138m.impl("quantized_gru_cell",
8139TORCH_FN(wrapper_CompositeImplicitAutograd__quantized_gru_cell));
8140m.impl("quantized_rnn_relu_cell",
8141TORCH_FN(wrapper_CompositeImplicitAutograd__quantized_rnn_relu_cell));
8142m.impl("quantized_rnn_tanh_cell",
8143TORCH_FN(wrapper_CompositeImplicitAutograd__quantized_rnn_tanh_cell));
8144m.impl("_pack_padded_sequence_backward",
8145TORCH_FN(wrapper_CompositeImplicitAutograd___pack_padded_sequence_backward));
8146m.impl("_pad_packed_sequence",
8147TORCH_FN(wrapper_CompositeImplicitAutograd___pad_packed_sequence));
8148m.impl("set_.source_Tensor_storage_offset",
8149TORCH_FN(wrapper_CompositeImplicitAutograd_source_Tensor_storage_offset_set_));
8150m.impl("index_add.dimname",
8151TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_index_add));
8152m.impl("index_fill_.Dimname_Scalar",
8153TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_Scalar_index_fill_));
8154m.impl("index_fill.Dimname_Scalar",
8155TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_Scalar_index_fill));
8156m.impl("index_fill_.Dimname_Tensor",
8157TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_Tensor_index_fill_));
8158m.impl("index_fill.Dimname_Tensor",
8159TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_Tensor_index_fill));
8160m.impl("scatter.dimname_src",
8161TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_src_scatter));
8162m.impl("scatter.dimname_value",
8163TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_value_scatter));
8164m.impl("scatter_add.dimname",
8165TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_scatter_add));
8166m.impl("bitwise_and_.Scalar",
8167TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_bitwise_and_));
8168m.impl("__and__.Scalar",
8169TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar___and__));
8170m.impl("__iand__.Scalar",
8171TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar___iand__));
8172m.impl("__and__.Tensor",
8173TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor___and__));
8174m.impl("__iand__.Tensor",
8175TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor___iand__));
8176m.impl("bitwise_or.Scalar",
8177TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_bitwise_or));
8178m.impl("bitwise_or_.Scalar",
8179TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_bitwise_or_));
8180m.impl("__or__.Scalar",
8181TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar___or__));
8182m.impl("__ior__.Scalar",
8183TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar___ior__));
8184m.impl("__or__.Tensor",
8185TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor___or__));
8186m.impl("__ior__.Tensor",
8187TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor___ior__));
8188m.impl("bitwise_xor.Scalar",
8189TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_bitwise_xor));
8190m.impl("bitwise_xor_.Scalar",
8191TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_bitwise_xor_));
8192m.impl("__xor__.Scalar",
8193TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar___xor__));
8194m.impl("__ixor__.Scalar",
8195TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar___ixor__));
8196m.impl("__xor__.Tensor",
8197TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor___xor__));
8198m.impl("__ixor__.Tensor",
8199TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor___ixor__));
8200m.impl("diag",
8201TORCH_FN(wrapper_CompositeImplicitAutograd__diag));
8202m.impl("diag.out",
8203TORCH_FN(wrapper_CompositeImplicitAutograd_out_diag_out));
8204m.impl("cross",
8205TORCH_FN(wrapper_CompositeImplicitAutograd__cross));
8206m.impl("cross.out",
8207TORCH_FN(wrapper_CompositeImplicitAutograd_out_cross_out));
8208m.impl("trace_backward",
8209TORCH_FN(wrapper_CompositeImplicitAutograd__trace_backward));
8210m.impl("not_equal.Scalar",
8211TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_not_equal));
8212m.impl("not_equal.Scalar_out",
8213TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_out_not_equal_out));
8214m.impl("not_equal_.Scalar",
8215TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_not_equal_));
8216m.impl("not_equal.Tensor",
8217TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_not_equal));
8218m.impl("not_equal.Tensor_out",
8219TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_out_not_equal_out));
8220m.impl("not_equal_.Tensor",
8221TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_not_equal_));
8222m.impl("greater_equal.Scalar",
8223TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_greater_equal));
8224m.impl("greater_equal.Scalar_out",
8225TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_out_greater_equal_out));
8226m.impl("greater_equal_.Scalar",
8227TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_greater_equal_));
8228m.impl("greater_equal.Tensor",
8229TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_greater_equal));
8230m.impl("greater_equal.Tensor_out",
8231TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_out_greater_equal_out));
8232m.impl("greater_equal_.Tensor",
8233TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_greater_equal_));
8234m.impl("less_equal.Scalar",
8235TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_less_equal));
8236m.impl("less_equal.Scalar_out",
8237TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_out_less_equal_out));
8238m.impl("less_equal_.Scalar",
8239TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_less_equal_));
8240m.impl("less_equal.Tensor",
8241TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_less_equal));
8242m.impl("less_equal.Tensor_out",
8243TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_out_less_equal_out));
8244m.impl("less_equal_.Tensor",
8245TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_less_equal_));
8246m.impl("greater.Scalar",
8247TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_greater));
8248m.impl("greater.Scalar_out",
8249TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_out_greater_out));
8250m.impl("greater_.Scalar",
8251TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_greater_));
8252m.impl("greater.Tensor",
8253TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_greater));
8254m.impl("greater.Tensor_out",
8255TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_out_greater_out));
8256m.impl("greater_.Tensor",
8257TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_greater_));
8258m.impl("less.Scalar",
8259TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_less));
8260m.impl("less.Scalar_out",
8261TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_out_less_out));
8262m.impl("less_.Scalar",
8263TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_less_));
8264m.impl("less.Tensor",
8265TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_less));
8266m.impl("less.Tensor_out",
8267TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_out_less_out));
8268m.impl("less_.Tensor",
8269TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_less_));
8270m.impl("take_along_dim",
8271TORCH_FN(wrapper_CompositeImplicitAutograd__take_along_dim));
8272m.impl("take_along_dim.out",
8273TORCH_FN(wrapper_CompositeImplicitAutograd_out_take_along_dim_out));
8274m.impl("index_select.dimname",
8275TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_index_select));
8276m.impl("index_select.dimname_out",
8277TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_index_select_out));
8278m.impl("index_select_backward",
8279TORCH_FN(wrapper_CompositeImplicitAutograd__index_select_backward));
8280m.impl("masked_select_backward",
8281TORCH_FN(wrapper_CompositeImplicitAutograd__masked_select_backward));
8282m.impl("nonzero_numpy",
8283TORCH_FN(wrapper_CompositeImplicitAutograd__nonzero_numpy));
8284m.impl("argwhere",
8285TORCH_FN(wrapper_CompositeImplicitAutograd__argwhere));
8286m.impl("gather_backward",
8287TORCH_FN(wrapper_CompositeImplicitAutograd__gather_backward));
8288m.impl("gather.dimname",
8289TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_gather));
8290m.impl("gather.dimname_out",
8291TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_gather_out));
8292m.impl("_gather_sparse_backward",
8293TORCH_FN(wrapper_CompositeImplicitAutograd___gather_sparse_backward));
8294m.impl("cross_entropy_loss",
8295TORCH_FN(wrapper_CompositeImplicitAutograd__cross_entropy_loss));
8296m.impl("linalg_vander",
8297TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_vander));
8298m.impl("svd",
8299TORCH_FN(wrapper_CompositeImplicitAutograd__svd));
8300m.impl("svd.U",
8301TORCH_FN(wrapper_CompositeImplicitAutograd_U_svd_out));
8302m.impl("swapaxes",
8303TORCH_FN(wrapper_CompositeImplicitAutograd__swapaxes));
8304m.impl("swapaxes_",
8305TORCH_FN(wrapper_CompositeImplicitAutograd__swapaxes_));
8306m.impl("swapdims",
8307TORCH_FN(wrapper_CompositeImplicitAutograd__swapdims));
8308m.impl("swapdims_",
8309TORCH_FN(wrapper_CompositeImplicitAutograd__swapdims_));
8310m.impl("qr",
8311TORCH_FN(wrapper_CompositeImplicitAutograd__qr));
8312m.impl("qr.Q",
8313TORCH_FN(wrapper_CompositeImplicitAutograd_Q_qr_out));
8314m.impl("orgqr",
8315TORCH_FN(wrapper_CompositeImplicitAutograd__orgqr));
8316m.impl("orgqr.out",
8317TORCH_FN(wrapper_CompositeImplicitAutograd_out_orgqr_out));
8318m.impl("_lu_with_info",
8319TORCH_FN(wrapper_CompositeImplicitAutograd___lu_with_info));
8320m.impl("lu_solve",
8321TORCH_FN(wrapper_CompositeImplicitAutograd__lu_solve));
8322m.impl("lu_solve.out",
8323TORCH_FN(wrapper_CompositeImplicitAutograd_out_lu_solve_out));
8324m.impl("arctan2",
8325TORCH_FN(wrapper_CompositeImplicitAutograd__arctan2));
8326m.impl("arctan2.out",
8327TORCH_FN(wrapper_CompositeImplicitAutograd_out_arctan2_out));
8328m.impl("arctan2_",
8329TORCH_FN(wrapper_CompositeImplicitAutograd__arctan2_));
8330m.impl("histogramdd",
8331TORCH_FN(wrapper_CompositeImplicitAutograd__histogramdd));
8332m.impl("histogramdd.int_bins",
8333TORCH_FN(wrapper_CompositeImplicitAutograd_int_bins_histogramdd));
8334m.impl("histogramdd.TensorList_bins",
8335TORCH_FN(wrapper_CompositeImplicitAutograd_TensorList_bins_histogramdd));
8336m.impl("max.other",
8337TORCH_FN(wrapper_CompositeImplicitAutograd_other_max));
8338m.impl("max.out",
8339TORCH_FN(wrapper_CompositeImplicitAutograd_out_max_out));
8340m.impl("min.other",
8341TORCH_FN(wrapper_CompositeImplicitAutograd_other_min));
8342m.impl("min.out",
8343TORCH_FN(wrapper_CompositeImplicitAutograd_out_min_out));
8344m.impl("quantile",
8345TORCH_FN(wrapper_CompositeImplicitAutograd__quantile));
8346m.impl("quantile.out",
8347TORCH_FN(wrapper_CompositeImplicitAutograd_out_quantile_out));
8348m.impl("quantile.scalar",
8349TORCH_FN(wrapper_CompositeImplicitAutograd_scalar_quantile));
8350m.impl("quantile.scalar_out",
8351TORCH_FN(wrapper_CompositeImplicitAutograd_scalar_out_quantile_out));
8352m.impl("nanquantile",
8353TORCH_FN(wrapper_CompositeImplicitAutograd__nanquantile));
8354m.impl("nanquantile.out",
8355TORCH_FN(wrapper_CompositeImplicitAutograd_out_nanquantile_out));
8356m.impl("nanquantile.scalar",
8357TORCH_FN(wrapper_CompositeImplicitAutograd_scalar_nanquantile));
8358m.impl("nanquantile.scalar_out",
8359TORCH_FN(wrapper_CompositeImplicitAutograd_scalar_out_nanquantile_out));
8360m.impl("sort.dimname",
8361TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_sort));
8362m.impl("sort.dimname_values",
8363TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_values_sort_out));
8364m.impl("sort.dimname_stable",
8365TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_stable_sort));
8366m.impl("sort.dimname_values_stable",
8367TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_values_stable_sort_out));
8368m.impl("msort",
8369TORCH_FN(wrapper_CompositeImplicitAutograd__msort));
8370m.impl("msort.out",
8371TORCH_FN(wrapper_CompositeImplicitAutograd_out_msort_out));
8372m.impl("argsort",
8373TORCH_FN(wrapper_CompositeImplicitAutograd__argsort));
8374m.impl("argsort.dimname",
8375TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_argsort));
8376m.impl("float_power.Tensor_Tensor",
8377TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_Tensor_float_power));
8378m.impl("float_power.Tensor_Tensor_out",
8379TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_Tensor_out_float_power_out));
8380m.impl("float_power_.Tensor",
8381TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_float_power_));
8382m.impl("float_power.Scalar",
8383TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_float_power));
8384m.impl("float_power.Scalar_out",
8385TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_out_float_power_out));
8386m.impl("float_power.Tensor_Scalar",
8387TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_Scalar_float_power));
8388m.impl("float_power.Tensor_Scalar_out",
8389TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_Scalar_out_float_power_out));
8390m.impl("float_power_.Scalar",
8391TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_float_power_));
8392m.impl("l1_loss",
8393TORCH_FN(wrapper_CompositeImplicitAutograd__l1_loss));
8394m.impl("multilabel_margin_loss",
8395TORCH_FN(wrapper_CompositeImplicitAutograd__multilabel_margin_loss));
8396m.impl("multilabel_margin_loss.out",
8397TORCH_FN(wrapper_CompositeImplicitAutograd_out_multilabel_margin_loss_out));
8398m.impl("nll_loss",
8399TORCH_FN(wrapper_CompositeImplicitAutograd__nll_loss));
8400m.impl("nll_loss.out",
8401TORCH_FN(wrapper_CompositeImplicitAutograd_out_nll_loss_out));
8402m.impl("nll_loss_nd",
8403TORCH_FN(wrapper_CompositeImplicitAutograd__nll_loss_nd));
8404m.impl("nll_loss2d",
8405TORCH_FN(wrapper_CompositeImplicitAutograd__nll_loss2d));
8406m.impl("nll_loss2d.out",
8407TORCH_FN(wrapper_CompositeImplicitAutograd_out_nll_loss2d_out));
8408m.impl("log_sigmoid",
8409TORCH_FN(wrapper_CompositeImplicitAutograd__log_sigmoid));
8410m.impl("log_sigmoid.out",
8411TORCH_FN(wrapper_CompositeImplicitAutograd_out_log_sigmoid_out));
8412m.impl("adaptive_avg_pool2d",
8413TORCH_FN(wrapper_CompositeImplicitAutograd__adaptive_avg_pool2d));
8414m.impl("adaptive_avg_pool3d",
8415TORCH_FN(wrapper_CompositeImplicitAutograd__adaptive_avg_pool3d));
8416m.impl("_pad_circular",
8417TORCH_FN(wrapper_CompositeImplicitAutograd___pad_circular));
8418m.impl("_pad_enum",
8419TORCH_FN(wrapper_CompositeImplicitAutograd___pad_enum));
8420m.impl("pad",
8421TORCH_FN(wrapper_CompositeImplicitAutograd__pad));
8422m.impl("upsample_linear1d.vec",
8423TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_linear1d));
8424m.impl("upsample_bilinear2d.vec",
8425TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_bilinear2d));
8426m.impl("_upsample_bilinear2d_aa.vec",
8427TORCH_FN(wrapper_CompositeImplicitAutograd_vec__upsample_bilinear2d_aa));
8428m.impl("upsample_trilinear3d.vec",
8429TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_trilinear3d));
8430m.impl("upsample_bicubic2d.vec",
8431TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_bicubic2d));
8432m.impl("_upsample_bicubic2d_aa.vec",
8433TORCH_FN(wrapper_CompositeImplicitAutograd_vec__upsample_bicubic2d_aa));
8434m.impl("upsample_nearest1d.vec",
8435TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_nearest1d));
8436m.impl("_upsample_nearest_exact1d.vec",
8437TORCH_FN(wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact1d));
8438m.impl("upsample_nearest2d.vec",
8439TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_nearest2d));
8440m.impl("_upsample_nearest_exact2d.vec",
8441TORCH_FN(wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact2d));
8442m.impl("upsample_nearest3d.vec",
8443TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_nearest3d));
8444m.impl("_upsample_nearest_exact3d.vec",
8445TORCH_FN(wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact3d));
8446m.impl("thnn_conv2d",
8447TORCH_FN(wrapper_CompositeImplicitAutograd__thnn_conv2d));
8448m.impl("thnn_conv2d.out",
8449TORCH_FN(wrapper_CompositeImplicitAutograd_out_thnn_conv2d_out));
8450m.impl("slow_conv3d",
8451TORCH_FN(wrapper_CompositeImplicitAutograd__slow_conv3d));
8452m.impl("slow_conv3d.out",
8453TORCH_FN(wrapper_CompositeImplicitAutograd_out_slow_conv3d_out));
8454m.impl("column_stack",
8455TORCH_FN(wrapper_CompositeImplicitAutograd__column_stack));
8456m.impl("column_stack.out",
8457TORCH_FN(wrapper_CompositeImplicitAutograd_out_column_stack_out));
8458m.impl("isfinite",
8459TORCH_FN(wrapper_CompositeImplicitAutograd__isfinite));
8460m.impl("_add_batch_dim",
8461TORCH_FN(wrapper_CompositeImplicitAutograd___add_batch_dim));
8462m.impl("_remove_batch_dim",
8463TORCH_FN(wrapper_CompositeImplicitAutograd___remove_batch_dim));
8464m.impl("special_expm1",
8465TORCH_FN(wrapper_CompositeImplicitAutograd__special_expm1));
8466m.impl("special_expm1.out",
8467TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_expm1_out));
8468m.impl("special_exp2",
8469TORCH_FN(wrapper_CompositeImplicitAutograd__special_exp2));
8470m.impl("special_exp2.out",
8471TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_exp2_out));
8472m.impl("special_psi",
8473TORCH_FN(wrapper_CompositeImplicitAutograd__special_psi));
8474m.impl("special_psi.out",
8475TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_psi_out));
8476m.impl("special_digamma",
8477TORCH_FN(wrapper_CompositeImplicitAutograd__special_digamma));
8478m.impl("special_digamma.out",
8479TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_digamma_out));
8480m.impl("special_gammaln",
8481TORCH_FN(wrapper_CompositeImplicitAutograd__special_gammaln));
8482m.impl("special_gammaln.out",
8483TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_gammaln_out));
8484m.impl("special_erf",
8485TORCH_FN(wrapper_CompositeImplicitAutograd__special_erf));
8486m.impl("special_erf.out",
8487TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_erf_out));
8488m.impl("special_erfc",
8489TORCH_FN(wrapper_CompositeImplicitAutograd__special_erfc));
8490m.impl("special_erfc.out",
8491TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_erfc_out));
8492m.impl("special_erfinv",
8493TORCH_FN(wrapper_CompositeImplicitAutograd__special_erfinv));
8494m.impl("special_erfinv.out",
8495TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_erfinv_out));
8496m.impl("special_ndtr",
8497TORCH_FN(wrapper_CompositeImplicitAutograd__special_ndtr));
8498m.impl("special_ndtr.out",
8499TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_ndtr_out));
8500m.impl("special_xlogy",
8501TORCH_FN(wrapper_CompositeImplicitAutograd__special_xlogy));
8502m.impl("special_xlogy.out",
8503TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_xlogy_out));
8504m.impl("special_xlogy.self_scalar",
8505TORCH_FN(wrapper_CompositeImplicitAutograd_self_scalar_special_xlogy));
8506m.impl("special_xlogy.self_scalar_out",
8507TORCH_FN(wrapper_CompositeImplicitAutograd_self_scalar_out_special_xlogy_out));
8508m.impl("special_xlogy.other_scalar",
8509TORCH_FN(wrapper_CompositeImplicitAutograd_other_scalar_special_xlogy));
8510m.impl("special_xlogy.other_scalar_out",
8511TORCH_FN(wrapper_CompositeImplicitAutograd_other_scalar_out_special_xlogy_out));
8512m.impl("special_i0",
8513TORCH_FN(wrapper_CompositeImplicitAutograd__special_i0));
8514m.impl("special_i0.out",
8515TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_i0_out));
8516m.impl("special_logit",
8517TORCH_FN(wrapper_CompositeImplicitAutograd__special_logit));
8518m.impl("special_logit.out",
8519TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_logit_out));
8520m.impl("special_polygamma",
8521TORCH_FN(wrapper_CompositeImplicitAutograd__special_polygamma));
8522m.impl("special_polygamma.out",
8523TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_polygamma_out));
8524m.impl("special_logsumexp",
8525TORCH_FN(wrapper_CompositeImplicitAutograd__special_logsumexp));
8526m.impl("special_logsumexp.out",
8527TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_logsumexp_out));
8528m.impl("special_expit",
8529TORCH_FN(wrapper_CompositeImplicitAutograd__special_expit));
8530m.impl("special_expit.out",
8531TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_expit_out));
8532m.impl("special_sinc",
8533TORCH_FN(wrapper_CompositeImplicitAutograd__special_sinc));
8534m.impl("special_sinc.out",
8535TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_sinc_out));
8536m.impl("special_round",
8537TORCH_FN(wrapper_CompositeImplicitAutograd__special_round));
8538m.impl("special_round.out",
8539TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_round_out));
8540m.impl("special_log1p",
8541TORCH_FN(wrapper_CompositeImplicitAutograd__special_log1p));
8542m.impl("special_log1p.out",
8543TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_log1p_out));
8544m.impl("special_log_softmax",
8545TORCH_FN(wrapper_CompositeImplicitAutograd__special_log_softmax));
8546m.impl("special_gammainc",
8547TORCH_FN(wrapper_CompositeImplicitAutograd__special_gammainc));
8548m.impl("special_gammainc.out",
8549TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_gammainc_out));
8550m.impl("special_gammaincc",
8551TORCH_FN(wrapper_CompositeImplicitAutograd__special_gammaincc));
8552m.impl("special_gammaincc.out",
8553TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_gammaincc_out));
8554m.impl("special_multigammaln",
8555TORCH_FN(wrapper_CompositeImplicitAutograd__special_multigammaln));
8556m.impl("special_multigammaln.out",
8557TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_multigammaln_out));
8558m.impl("special_softmax",
8559TORCH_FN(wrapper_CompositeImplicitAutograd__special_softmax));
8560m.impl("fft_fft",
8561TORCH_FN(wrapper_CompositeImplicitAutograd__fft_fft));
8562m.impl("fft_fft.out",
8563TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_fft_out));
8564m.impl("fft_ifft",
8565TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ifft));
8566m.impl("fft_ifft.out",
8567TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_ifft_out));
8568m.impl("fft_rfft",
8569TORCH_FN(wrapper_CompositeImplicitAutograd__fft_rfft));
8570m.impl("fft_rfft.out",
8571TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_rfft_out));
8572m.impl("fft_irfft",
8573TORCH_FN(wrapper_CompositeImplicitAutograd__fft_irfft));
8574m.impl("fft_irfft.out",
8575TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_irfft_out));
8576m.impl("fft_hfft",
8577TORCH_FN(wrapper_CompositeImplicitAutograd__fft_hfft));
8578m.impl("fft_hfft.out",
8579TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_hfft_out));
8580m.impl("fft_ihfft",
8581TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ihfft));
8582m.impl("fft_ihfft.out",
8583TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_ihfft_out));
8584m.impl("fft_fft2",
8585TORCH_FN(wrapper_CompositeImplicitAutograd__fft_fft2));
8586m.impl("fft_fft2.out",
8587TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_fft2_out));
8588m.impl("fft_ifft2",
8589TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ifft2));
8590m.impl("fft_ifft2.out",
8591TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_ifft2_out));
8592m.impl("fft_rfft2",
8593TORCH_FN(wrapper_CompositeImplicitAutograd__fft_rfft2));
8594m.impl("fft_rfft2.out",
8595TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_rfft2_out));
8596m.impl("fft_irfft2",
8597TORCH_FN(wrapper_CompositeImplicitAutograd__fft_irfft2));
8598m.impl("fft_irfft2.out",
8599TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_irfft2_out));
8600m.impl("fft_hfft2",
8601TORCH_FN(wrapper_CompositeImplicitAutograd__fft_hfft2));
8602m.impl("fft_hfft2.out",
8603TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_hfft2_out));
8604m.impl("fft_ihfft2",
8605TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ihfft2));
8606m.impl("fft_ihfft2.out",
8607TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_ihfft2_out));
8608m.impl("fft_fftn",
8609TORCH_FN(wrapper_CompositeImplicitAutograd__fft_fftn));
8610m.impl("fft_fftn.out",
8611TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_fftn_out));
8612m.impl("fft_ifftn",
8613TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ifftn));
8614m.impl("fft_ifftn.out",
8615TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_ifftn_out));
8616m.impl("fft_rfftn",
8617TORCH_FN(wrapper_CompositeImplicitAutograd__fft_rfftn));
8618m.impl("fft_rfftn.out",
8619TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_rfftn_out));
8620m.impl("fft_irfftn",
8621TORCH_FN(wrapper_CompositeImplicitAutograd__fft_irfftn));
8622m.impl("fft_irfftn.out",
8623TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_irfftn_out));
8624m.impl("fft_hfftn",
8625TORCH_FN(wrapper_CompositeImplicitAutograd__fft_hfftn));
8626m.impl("fft_hfftn.out",
8627TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_hfftn_out));
8628m.impl("fft_ihfftn",
8629TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ihfftn));
8630m.impl("fft_ihfftn.out",
8631TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_ihfftn_out));
8632m.impl("fft_fftshift",
8633TORCH_FN(wrapper_CompositeImplicitAutograd__fft_fftshift));
8634m.impl("fft_ifftshift",
8635TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ifftshift));
8636m.impl("linalg_cholesky",
8637TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_cholesky));
8638m.impl("linalg_cholesky.out",
8639TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_cholesky_out));
8640m.impl("linalg_lu_factor",
8641TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_lu_factor));
8642m.impl("linalg_lu_factor.out",
8643TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_lu_factor_out));
8644m.impl("linalg_det",
8645TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_det));
8646m.impl("linalg_det.out",
8647TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_det_out));
8648m.impl("det",
8649TORCH_FN(wrapper_CompositeImplicitAutograd__det));
8650m.impl("linalg_ldl_factor",
8651TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_ldl_factor));
8652m.impl("linalg_ldl_factor.out",
8653TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_ldl_factor_out));
8654m.impl("linalg_matmul",
8655TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_matmul));
8656m.impl("linalg_matmul.out",
8657TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_matmul_out));
8658m.impl("linalg_vecdot",
8659TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_vecdot));
8660m.impl("linalg_vecdot.out",
8661TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_vecdot_out));
8662m.impl("linalg_slogdet",
8663TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_slogdet));
8664m.impl("linalg_slogdet.out",
8665TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_slogdet_out));
8666m.impl("slogdet",
8667TORCH_FN(wrapper_CompositeImplicitAutograd__slogdet));
8668m.impl("slogdet.out",
8669TORCH_FN(wrapper_CompositeImplicitAutograd_out_slogdet_out));
8670m.impl("logdet",
8671TORCH_FN(wrapper_CompositeImplicitAutograd__logdet));
8672m.impl("linalg_eigvals",
8673TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_eigvals));
8674m.impl("linalg_eigvals.out",
8675TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_eigvals_out));
8676m.impl("linalg_eigh",
8677TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_eigh));
8678m.impl("linalg_eigh.eigvals",
8679TORCH_FN(wrapper_CompositeImplicitAutograd_eigvals_linalg_eigh_out));
8680m.impl("linalg_eigvalsh",
8681TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_eigvalsh));
8682m.impl("linalg_eigvalsh.out",
8683TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_eigvalsh_out));
8684m.impl("linalg_inv",
8685TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_inv));
8686m.impl("linalg_inv.out",
8687TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_inv_out));
8688m.impl("inverse",
8689TORCH_FN(wrapper_CompositeImplicitAutograd__inverse));
8690m.impl("inverse.out",
8691TORCH_FN(wrapper_CompositeImplicitAutograd_out_inverse_out));
8692m.impl("inner",
8693TORCH_FN(wrapper_CompositeImplicitAutograd__inner));
8694m.impl("inner.out",
8695TORCH_FN(wrapper_CompositeImplicitAutograd_out_inner_out));
8696m.impl("outer",
8697TORCH_FN(wrapper_CompositeImplicitAutograd__outer));
8698m.impl("outer.out",
8699TORCH_FN(wrapper_CompositeImplicitAutograd_out_outer_out));
8700m.impl("ger",
8701TORCH_FN(wrapper_CompositeImplicitAutograd__ger));
8702m.impl("ger.out",
8703TORCH_FN(wrapper_CompositeImplicitAutograd_out_ger_out));
8704m.impl("linalg_norm",
8705TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_norm));
8706m.impl("linalg_norm.out",
8707TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_norm_out));
8708m.impl("linalg_norm.ord_str",
8709TORCH_FN(wrapper_CompositeImplicitAutograd_ord_str_linalg_norm));
8710m.impl("linalg_norm.ord_str_out",
8711TORCH_FN(wrapper_CompositeImplicitAutograd_ord_str_out_linalg_norm_out));
8712m.impl("linalg_matrix_norm",
8713TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_matrix_norm));
8714m.impl("linalg_matrix_norm.out",
8715TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_matrix_norm_out));
8716m.impl("linalg_matrix_norm.str_ord",
8717TORCH_FN(wrapper_CompositeImplicitAutograd_str_ord_linalg_matrix_norm));
8718m.impl("linalg_matrix_norm.str_ord_out",
8719TORCH_FN(wrapper_CompositeImplicitAutograd_str_ord_out_linalg_matrix_norm_out));
8720m.impl("linalg_svd",
8721TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_svd));
8722m.impl("linalg_svd.U",
8723TORCH_FN(wrapper_CompositeImplicitAutograd_U_linalg_svd_out));
8724m.impl("linalg_svdvals",
8725TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_svdvals));
8726m.impl("linalg_svdvals.out",
8727TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_svdvals_out));
8728m.impl("linalg_cond",
8729TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_cond));
8730m.impl("linalg_cond.out",
8731TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_cond_out));
8732m.impl("linalg_cond.p_str",
8733TORCH_FN(wrapper_CompositeImplicitAutograd_p_str_linalg_cond));
8734m.impl("linalg_cond.p_str_out",
8735TORCH_FN(wrapper_CompositeImplicitAutograd_p_str_out_linalg_cond_out));
8736m.impl("linalg_pinv.atol_rtol_float",
8737TORCH_FN(wrapper_CompositeImplicitAutograd_atol_rtol_float_linalg_pinv));
8738m.impl("linalg_pinv.atol_rtol_float_out",
8739TORCH_FN(wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_pinv_out));
8740m.impl("linalg_pinv",
8741TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_pinv));
8742m.impl("linalg_pinv.out",
8743TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_pinv_out));
8744m.impl("linalg_pinv.rcond_tensor",
8745TORCH_FN(wrapper_CompositeImplicitAutograd_rcond_tensor_linalg_pinv));
8746m.impl("linalg_pinv.out_rcond_tensor",
8747TORCH_FN(wrapper_CompositeImplicitAutograd_out_rcond_tensor_linalg_pinv_out));
8748m.impl("linalg_solve_ex",
8749TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_solve_ex));
8750m.impl("linalg_solve_ex.out",
8751TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_solve_ex_out));
8752m.impl("linalg_solve",
8753TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_solve));
8754m.impl("linalg_solve.out",
8755TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_solve_out));
8756m.impl("linalg_tensorinv",
8757TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_tensorinv));
8758m.impl("linalg_tensorinv.out",
8759TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_tensorinv_out));
8760m.impl("linalg_tensorsolve",
8761TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_tensorsolve));
8762m.impl("linalg_tensorsolve.out",
8763TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_tensorsolve_out));
8764m.impl("linalg_matrix_power",
8765TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_matrix_power));
8766m.impl("linalg_matrix_power.out",
8767TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_matrix_power_out));
8768m.impl("linalg_matrix_rank.atol_rtol_tensor",
8769TORCH_FN(wrapper_CompositeImplicitAutograd_atol_rtol_tensor_linalg_matrix_rank));
8770m.impl("linalg_matrix_rank.atol_rtol_tensor_out",
8771TORCH_FN(wrapper_CompositeImplicitAutograd_atol_rtol_tensor_out_linalg_matrix_rank_out));
8772m.impl("linalg_matrix_rank.atol_rtol_float",
8773TORCH_FN(wrapper_CompositeImplicitAutograd_atol_rtol_float_linalg_matrix_rank));
8774m.impl("linalg_matrix_rank.atol_rtol_float_out",
8775TORCH_FN(wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_matrix_rank_out));
8776m.impl("linalg_matrix_rank",
8777TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_matrix_rank));
8778m.impl("linalg_matrix_rank.out",
8779TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_matrix_rank_out));
8780m.impl("linalg_matrix_rank.tol_tensor",
8781TORCH_FN(wrapper_CompositeImplicitAutograd_tol_tensor_linalg_matrix_rank));
8782m.impl("linalg_matrix_rank.out_tol_tensor",
8783TORCH_FN(wrapper_CompositeImplicitAutograd_out_tol_tensor_linalg_matrix_rank_out));
8784m.impl("linalg_multi_dot",
8785TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_multi_dot));
8786m.impl("linalg_multi_dot.out",
8787TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_multi_dot_out));
8788m.impl("nested_to_padded_tensor",
8789TORCH_FN(wrapper_CompositeImplicitAutograd__nested_to_padded_tensor));
8790m.impl("_test_serialization_subcmul",
8791TORCH_FN(wrapper_CompositeImplicitAutograd___test_serialization_subcmul));
8792m.impl("_test_string_default",
8793TORCH_FN(wrapper_CompositeImplicitAutograd___test_string_default));
8794m.impl("_test_ambiguous_defaults.a",
8795TORCH_FN(wrapper_CompositeImplicitAutograd_a__test_ambiguous_defaults));
8796m.impl("_test_ambiguous_defaults.b",
8797TORCH_FN(wrapper_CompositeImplicitAutograd_b__test_ambiguous_defaults));
8798m.impl("_test_autograd_multiple_dispatch.ntonly",
8799TORCH_FN(wrapper_CompositeImplicitAutograd_ntonly__test_autograd_multiple_dispatch));
8800m.impl("pad_sequence",
8801TORCH_FN(wrapper_CompositeImplicitAutograd__pad_sequence));
8802m.impl("flatten_dense_tensors",
8803TORCH_FN(wrapper_CompositeImplicitAutograd__flatten_dense_tensors));
8804m.impl("unflatten_dense_tensors",
8805TORCH_FN(wrapper_CompositeImplicitAutograd__unflatten_dense_tensors));
8806m.impl("scaled_dot_product_attention",
8807TORCH_FN(wrapper_CompositeImplicitAutograd__scaled_dot_product_attention));
8808m.impl("_scaled_dot_product_attention",
8809TORCH_FN(wrapper_CompositeImplicitAutograd___scaled_dot_product_attention));
8810m.impl("_scaled_dot_product_attention_math",
8811TORCH_FN(wrapper_CompositeImplicitAutograd___scaled_dot_product_attention_math));
8812m.impl("special_chebyshev_polynomial_t.x_scalar",
8813TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_t));
8814m.impl("special_chebyshev_polynomial_t.x_scalar_out",
8815TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_t_out));
8816m.impl("special_chebyshev_polynomial_t.n_scalar",
8817TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_t));
8818m.impl("special_chebyshev_polynomial_u.x_scalar",
8819TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_u));
8820m.impl("special_chebyshev_polynomial_u.x_scalar_out",
8821TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_u_out));
8822m.impl("special_chebyshev_polynomial_u.n_scalar",
8823TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_u));
8824m.impl("special_chebyshev_polynomial_v.x_scalar",
8825TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_v));
8826m.impl("special_chebyshev_polynomial_v.x_scalar_out",
8827TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_v_out));
8828m.impl("special_chebyshev_polynomial_v.n_scalar",
8829TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_v));
8830m.impl("special_chebyshev_polynomial_w.x_scalar",
8831TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_w));
8832m.impl("special_chebyshev_polynomial_w.x_scalar_out",
8833TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_w_out));
8834m.impl("special_chebyshev_polynomial_w.n_scalar",
8835TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_w));
8836m.impl("special_hermite_polynomial_h.x_scalar",
8837TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_hermite_polynomial_h));
8838m.impl("special_hermite_polynomial_h.x_scalar_out",
8839TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_h_out));
8840m.impl("special_hermite_polynomial_h.n_scalar",
8841TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_hermite_polynomial_h));
8842m.impl("special_hermite_polynomial_he.x_scalar",
8843TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_hermite_polynomial_he));
8844m.impl("special_hermite_polynomial_he.x_scalar_out",
8845TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_he_out));
8846m.impl("special_hermite_polynomial_he.n_scalar",
8847TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_hermite_polynomial_he));
8848m.impl("special_laguerre_polynomial_l.x_scalar",
8849TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_laguerre_polynomial_l));
8850m.impl("special_laguerre_polynomial_l.x_scalar_out",
8851TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_laguerre_polynomial_l_out));
8852m.impl("special_laguerre_polynomial_l.n_scalar",
8853TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_laguerre_polynomial_l));
8854m.impl("special_legendre_polynomial_p.x_scalar",
8855TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_legendre_polynomial_p));
8856m.impl("special_legendre_polynomial_p.x_scalar_out",
8857TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_legendre_polynomial_p_out));
8858m.impl("special_legendre_polynomial_p.n_scalar",
8859TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_legendre_polynomial_p));
8860m.impl("special_shifted_chebyshev_polynomial_t.x_scalar",
8861TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_t));
8862m.impl("special_shifted_chebyshev_polynomial_t.x_scalar_out",
8863TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_t_out));
8864m.impl("special_shifted_chebyshev_polynomial_t.n_scalar",
8865TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_t));
8866m.impl("special_shifted_chebyshev_polynomial_u.x_scalar",
8867TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_u));
8868m.impl("special_shifted_chebyshev_polynomial_u.x_scalar_out",
8869TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_u_out));
8870m.impl("special_shifted_chebyshev_polynomial_u.n_scalar",
8871TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_u));
8872m.impl("special_shifted_chebyshev_polynomial_v.x_scalar",
8873TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_v));
8874m.impl("special_shifted_chebyshev_polynomial_v.x_scalar_out",
8875TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_v_out));
8876m.impl("special_shifted_chebyshev_polynomial_v.n_scalar",
8877TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_v));
8878m.impl("special_shifted_chebyshev_polynomial_w.x_scalar",
8879TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_w));
8880m.impl("special_shifted_chebyshev_polynomial_w.x_scalar_out",
8881TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_w_out));
8882m.impl("special_shifted_chebyshev_polynomial_w.n_scalar",
8883TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_w));
8884};
8885} // anonymous namespace
8886namespace compositeimplicitautograd {
8887at::Tensor _cast_Byte(const at::Tensor & self, bool non_blocking) {
8888return wrapper_CompositeImplicitAutograd___cast_Byte(self, non_blocking);
8889}
8890at::Tensor _cast_Char(const at::Tensor & self, bool non_blocking) {
8891return wrapper_CompositeImplicitAutograd___cast_Char(self, non_blocking);
8892}
8893at::Tensor _cast_Double(const at::Tensor & self, bool non_blocking) {
8894return wrapper_CompositeImplicitAutograd___cast_Double(self, non_blocking);
8895}
8896at::Tensor _cast_Float(const at::Tensor & self, bool non_blocking) {
8897return wrapper_CompositeImplicitAutograd___cast_Float(self, non_blocking);
8898}
8899at::Tensor _cast_Int(const at::Tensor & self, bool non_blocking) {
8900return wrapper_CompositeImplicitAutograd___cast_Int(self, non_blocking);
8901}
8902at::Tensor _cast_Long(const at::Tensor & self, bool non_blocking) {
8903return wrapper_CompositeImplicitAutograd___cast_Long(self, non_blocking);
8904}
8905at::Tensor _cast_Short(const at::Tensor & self, bool non_blocking) {
8906return wrapper_CompositeImplicitAutograd___cast_Short(self, non_blocking);
8907}
8908at::Tensor _cast_Half(const at::Tensor & self, bool non_blocking) {
8909return wrapper_CompositeImplicitAutograd___cast_Half(self, non_blocking);
8910}
8911void _backward(const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph) {
8912return wrapper_CompositeImplicitAutograd___backward(self, inputs, gradient, retain_graph, create_graph);
8913}
8914void set_data(at::Tensor & self, const at::Tensor & new_data) {
8915return wrapper_CompositeImplicitAutograd__set_data(self, new_data);
8916}
8917at::Tensor data(const at::Tensor & self) {
8918return wrapper_CompositeImplicitAutograd__data(self);
8919}
8920bool is_leaf(const at::Tensor & self) {
8921return wrapper_CompositeImplicitAutograd__is_leaf(self);
8922}
8923int64_t output_nr(const at::Tensor & self) {
8924return wrapper_CompositeImplicitAutograd__output_nr(self);
8925}
8926int64_t _version(const at::Tensor & self) {
8927return wrapper_CompositeImplicitAutograd___version(self);
8928}
8929at::Tensor & requires_grad_(at::Tensor & self, bool requires_grad) {
8930return wrapper_CompositeImplicitAutograd__requires_grad_(self, requires_grad);
8931}
8932void retain_grad(at::Tensor & self) {
8933return wrapper_CompositeImplicitAutograd__retain_grad(self);
8934}
8935bool retains_grad(const at::Tensor & self) {
8936return wrapper_CompositeImplicitAutograd__retains_grad(self);
8937}
8938::std::tuple<at::Tensor,at::Tensor> _unpack_dual(const at::Tensor & dual, int64_t level) {
8939return wrapper_CompositeImplicitAutograd___unpack_dual(dual, level);
8940}
8941at::Tensor & rename_(at::Tensor & self, c10::optional<at::DimnameList> names) {
8942return wrapper_CompositeImplicitAutograd__rename_(self, names);
8943}
8944at::Tensor rename(const at::Tensor & self, c10::optional<at::DimnameList> names) {
8945return wrapper_CompositeImplicitAutograd__rename(self, names);
8946}
8947at::Tensor align_to(const at::Tensor & self, at::DimnameList names) {
8948return wrapper_CompositeImplicitAutograd__align_to(self, names);
8949}
8950at::Tensor align_to(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) {
8951return wrapper_CompositeImplicitAutograd_ellipsis_idx_align_to(self, order, ellipsis_idx);
8952}
8953at::Tensor align_as(const at::Tensor & self, const at::Tensor & other) {
8954return wrapper_CompositeImplicitAutograd__align_as(self, other);
8955}
8956::std::vector<at::Tensor> align_tensors(at::TensorList tensors) {
8957return wrapper_CompositeImplicitAutograd__align_tensors(tensors);
8958}
8959void _assert_tensor_metadata(const at::Tensor & a, at::OptionalIntArrayRef size, at::OptionalIntArrayRef stride, c10::optional<at::ScalarType> dtype) {
8960return wrapper_CompositeImplicitAutograd___assert_tensor_metadata(a, size, stride, dtype);
8961}
8962at::Tensor refine_names(const at::Tensor & self, at::DimnameList names) {
8963return wrapper_CompositeImplicitAutograd__refine_names(self, names);
8964}
8965bool _use_cudnn_rnn_flatten_weight() {
8966return wrapper_CompositeImplicitAutograd___use_cudnn_rnn_flatten_weight();
8967}
8968int64_t _debug_has_internal_overlap(const at::Tensor & self) {
8969return wrapper_CompositeImplicitAutograd___debug_has_internal_overlap(self);
8970}
8971::std::tuple<at::Tensor,at::Tensor> _sobol_engine_draw(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<at::ScalarType> dtype) {
8972return wrapper_CompositeImplicitAutograd___sobol_engine_draw(quasi, n, sobolstate, dimension, num_generated, dtype);
8973}
8974at::Tensor & _sobol_engine_ff_(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) {
8975return wrapper_CompositeImplicitAutograd___sobol_engine_ff_(self, n, sobolstate, dimension, num_generated);
8976}
8977at::Tensor & _sobol_engine_scramble_(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) {
8978return wrapper_CompositeImplicitAutograd___sobol_engine_scramble_(self, ltm, dimension);
8979}
8980at::Tensor & _sobol_engine_initialize_state_(at::Tensor & self, int64_t dimension) {
8981return wrapper_CompositeImplicitAutograd___sobol_engine_initialize_state_(self, dimension);
8982}
8983at::Tensor _reshape_from_tensor(const at::Tensor & self, const at::Tensor & shape) {
8984return wrapper_CompositeImplicitAutograd___reshape_from_tensor(self, shape);
8985}
8986at::Tensor _shape_as_tensor(const at::Tensor & self) {
8987return wrapper_CompositeImplicitAutograd___shape_as_tensor(self);
8988}
8989at::Tensor dropout(const at::Tensor & input, double p, bool train) {
8990return wrapper_CompositeImplicitAutograd__dropout(input, p, train);
8991}
8992at::Tensor & dropout_(at::Tensor & self, double p, bool train) {
8993return wrapper_CompositeImplicitAutograd__dropout_(self, p, train);
8994}
8995at::Tensor feature_dropout(const at::Tensor & input, double p, bool train) {
8996return wrapper_CompositeImplicitAutograd__feature_dropout(input, p, train);
8997}
8998at::Tensor & feature_dropout_(at::Tensor & self, double p, bool train) {
8999return wrapper_CompositeImplicitAutograd__feature_dropout_(self, p, train);
9000}
9001at::Tensor alpha_dropout(const at::Tensor & input, double p, bool train) {
9002return wrapper_CompositeImplicitAutograd__alpha_dropout(input, p, train);
9003}
9004at::Tensor & alpha_dropout_(at::Tensor & self, double p, bool train) {
9005return wrapper_CompositeImplicitAutograd__alpha_dropout_(self, p, train);
9006}
9007at::Tensor feature_alpha_dropout(const at::Tensor & input, double p, bool train) {
9008return wrapper_CompositeImplicitAutograd__feature_alpha_dropout(input, p, train);
9009}
9010at::Tensor & feature_alpha_dropout_(at::Tensor & self, double p, bool train) {
9011return wrapper_CompositeImplicitAutograd__feature_alpha_dropout_(self, p, train);
9012}
9013at::Tensor absolute(const at::Tensor & self) {
9014return wrapper_CompositeImplicitAutograd__absolute(self);
9015}
9016at::Tensor & absolute_out(at::Tensor & out, const at::Tensor & self) {
9017return wrapper_CompositeImplicitAutograd_out_absolute_out(self, out);
9018}
9019at::Tensor & absolute_outf(const at::Tensor & self, at::Tensor & out) {
9020return wrapper_CompositeImplicitAutograd_out_absolute_out(self, out);
9021}
9022at::Tensor & absolute_(at::Tensor & self) {
9023return wrapper_CompositeImplicitAutograd__absolute_(self);
9024}
9025at::Tensor chalf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
9026return wrapper_CompositeImplicitAutograd__chalf(self, memory_format);
9027}
9028at::Tensor real(const at::Tensor & self) {
9029return wrapper_CompositeImplicitAutograd__real(self);
9030}
9031at::Tensor imag(const at::Tensor & self) {
9032return wrapper_CompositeImplicitAutograd__imag(self);
9033}
9034at::Tensor conj(const at::Tensor & self) {
9035return wrapper_CompositeImplicitAutograd__conj(self);
9036}
9037at::Tensor conj_physical(const at::Tensor & self) {
9038return wrapper_CompositeImplicitAutograd__conj_physical(self);
9039}
9040at::Tensor resolve_conj(const at::Tensor & self) {
9041return wrapper_CompositeImplicitAutograd__resolve_conj(self);
9042}
9043at::Tensor resolve_neg(const at::Tensor & self) {
9044return wrapper_CompositeImplicitAutograd__resolve_neg(self);
9045}
9046at::Tensor arccos(const at::Tensor & self) {
9047return wrapper_CompositeImplicitAutograd__arccos(self);
9048}
9049at::Tensor & arccos_out(at::Tensor & out, const at::Tensor & self) {
9050return wrapper_CompositeImplicitAutograd_out_arccos_out(self, out);
9051}
9052at::Tensor & arccos_outf(const at::Tensor & self, at::Tensor & out) {
9053return wrapper_CompositeImplicitAutograd_out_arccos_out(self, out);
9054}
9055at::Tensor & arccos_(at::Tensor & self) {
9056return wrapper_CompositeImplicitAutograd__arccos_(self);
9057}
9058at::Tensor avg_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
9059return wrapper_CompositeImplicitAutograd__avg_pool1d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
9060}
9061at::Tensor adaptive_avg_pool1d(const at::Tensor & self, at::IntArrayRef output_size) {
9062return wrapper_CompositeImplicitAutograd__adaptive_avg_pool1d(self, output_size);
9063}
9064::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool1d(const at::Tensor & self, at::IntArrayRef output_size) {
9065return wrapper_CompositeImplicitAutograd__adaptive_max_pool1d(self, output_size);
9066}
9067at::Tensor affine_grid_generator_backward(const at::Tensor & grad, at::IntArrayRef size, bool align_corners) {
9068return wrapper_CompositeImplicitAutograd__affine_grid_generator_backward(grad, size, align_corners);
9069}
9070at::Tensor _test_check_tensor(const at::Tensor & self) {
9071return wrapper_CompositeImplicitAutograd___test_check_tensor(self);
9072}
9073at::Tensor all(const at::Tensor & self, at::Dimname dim, bool keepdim) {
9074return wrapper_CompositeImplicitAutograd_dimname_all(self, dim, keepdim);
9075}
9076at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim) {
9077return wrapper_CompositeImplicitAutograd_dimname_out_all_out(self, dim, keepdim, out);
9078}
9079at::Tensor & all_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
9080return wrapper_CompositeImplicitAutograd_dimname_out_all_out(self, dim, keepdim, out);
9081}
9082at::Tensor any(const at::Tensor & self, at::Dimname dim, bool keepdim) {
9083return wrapper_CompositeImplicitAutograd_dimname_any(self, dim, keepdim);
9084}
9085at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim) {
9086return wrapper_CompositeImplicitAutograd_dimname_out_any_out(self, dim, keepdim, out);
9087}
9088at::Tensor & any_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
9089return wrapper_CompositeImplicitAutograd_dimname_out_any_out(self, dim, keepdim, out);
9090}
9091at::Tensor _dim_arange(const at::Tensor & like, int64_t dim) {
9092return wrapper_CompositeImplicitAutograd___dim_arange(like, dim);
9093}
9094at::Tensor arccosh(const at::Tensor & self) {
9095return wrapper_CompositeImplicitAutograd__arccosh(self);
9096}
9097at::Tensor & arccosh_out(at::Tensor & out, const at::Tensor & self) {
9098return wrapper_CompositeImplicitAutograd_out_arccosh_out(self, out);
9099}
9100at::Tensor & arccosh_outf(const at::Tensor & self, at::Tensor & out) {
9101return wrapper_CompositeImplicitAutograd_out_arccosh_out(self, out);
9102}
9103at::Tensor & arccosh_(at::Tensor & self) {
9104return wrapper_CompositeImplicitAutograd__arccosh_(self);
9105}
9106at::Tensor arcsinh(const at::Tensor & self) {
9107return wrapper_CompositeImplicitAutograd__arcsinh(self);
9108}
9109at::Tensor & arcsinh_out(at::Tensor & out, const at::Tensor & self) {
9110return wrapper_CompositeImplicitAutograd_out_arcsinh_out(self, out);
9111}
9112at::Tensor & arcsinh_outf(const at::Tensor & self, at::Tensor & out) {
9113return wrapper_CompositeImplicitAutograd_out_arcsinh_out(self, out);
9114}
9115at::Tensor & arcsinh_(at::Tensor & self) {
9116return wrapper_CompositeImplicitAutograd__arcsinh_(self);
9117}
9118at::Tensor arctanh(const at::Tensor & self) {
9119return wrapper_CompositeImplicitAutograd__arctanh(self);
9120}
9121at::Tensor & arctanh_out(at::Tensor & out, const at::Tensor & self) {
9122return wrapper_CompositeImplicitAutograd_out_arctanh_out(self, out);
9123}
9124at::Tensor & arctanh_outf(const at::Tensor & self, at::Tensor & out) {
9125return wrapper_CompositeImplicitAutograd_out_arctanh_out(self, out);
9126}
9127at::Tensor & arctanh_(at::Tensor & self) {
9128return wrapper_CompositeImplicitAutograd__arctanh_(self);
9129}
9130at::Tensor arcsin(const at::Tensor & self) {
9131return wrapper_CompositeImplicitAutograd__arcsin(self);
9132}
9133at::Tensor & arcsin_out(at::Tensor & out, const at::Tensor & self) {
9134return wrapper_CompositeImplicitAutograd_out_arcsin_out(self, out);
9135}
9136at::Tensor & arcsin_outf(const at::Tensor & self, at::Tensor & out) {
9137return wrapper_CompositeImplicitAutograd_out_arcsin_out(self, out);
9138}
9139at::Tensor & arcsin_(at::Tensor & self) {
9140return wrapper_CompositeImplicitAutograd__arcsin_(self);
9141}
9142at::Tensor arctan(const at::Tensor & self) {
9143return wrapper_CompositeImplicitAutograd__arctan(self);
9144}
9145at::Tensor & arctan_out(at::Tensor & out, const at::Tensor & self) {
9146return wrapper_CompositeImplicitAutograd_out_arctan_out(self, out);
9147}
9148at::Tensor & arctan_outf(const at::Tensor & self, at::Tensor & out) {
9149return wrapper_CompositeImplicitAutograd_out_arctan_out(self, out);
9150}
9151at::Tensor & arctan_(at::Tensor & self) {
9152return wrapper_CompositeImplicitAutograd__arctan_(self);
9153}
9154at::Tensor atleast_1d(const at::Tensor & self) {
9155return wrapper_CompositeImplicitAutograd__atleast_1d(self);
9156}
9157::std::vector<at::Tensor> atleast_1d(at::TensorList tensors) {
9158return wrapper_CompositeImplicitAutograd_Sequence_atleast_1d(tensors);
9159}
9160at::Tensor atleast_2d(const at::Tensor & self) {
9161return wrapper_CompositeImplicitAutograd__atleast_2d(self);
9162}
9163::std::vector<at::Tensor> atleast_2d(at::TensorList tensors) {
9164return wrapper_CompositeImplicitAutograd_Sequence_atleast_2d(tensors);
9165}
9166at::Tensor atleast_3d(const at::Tensor & self) {
9167return wrapper_CompositeImplicitAutograd__atleast_3d(self);
9168}
9169::std::vector<at::Tensor> atleast_3d(at::TensorList tensors) {
9170return wrapper_CompositeImplicitAutograd_Sequence_atleast_3d(tensors);
9171}
9172at::Tensor batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
9173return wrapper_CompositeImplicitAutograd__batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
9174}
9175::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t> _batch_norm_impl_index(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
9176return wrapper_CompositeImplicitAutograd___batch_norm_impl_index(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
9177}
9178::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
9179return wrapper_CompositeImplicitAutograd___batch_norm_impl_index_backward(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
9180}
9181at::Tensor bilinear(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
9182return wrapper_CompositeImplicitAutograd__bilinear(input1, input2, weight, bias);
9183}
9184::std::vector<at::Tensor> broadcast_tensors(at::TensorList tensors) {
9185return wrapper_CompositeImplicitAutograd__broadcast_tensors(tensors);
9186}
9187at::Tensor broadcast_to(const at::Tensor & self, at::IntArrayRef size) {
9188return wrapper_CompositeImplicitAutograd__broadcast_to(self, c10::fromIntArrayRefSlow(size));
9189}
9190at::Tensor broadcast_to_symint(const at::Tensor & self, c10::SymIntArrayRef size) {
9191return wrapper_CompositeImplicitAutograd__broadcast_to(self, size);
9192}
9193at::Tensor cat(at::TensorList tensors, at::Dimname dim) {
9194return wrapper_CompositeImplicitAutograd_names_cat(tensors, dim);
9195}
9196at::Tensor & cat_out(at::Tensor & out, at::TensorList tensors, at::Dimname dim) {
9197return wrapper_CompositeImplicitAutograd_names_out_cat_out(tensors, dim, out);
9198}
9199at::Tensor & cat_outf(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
9200return wrapper_CompositeImplicitAutograd_names_out_cat_out(tensors, dim, out);
9201}
9202at::Tensor concat(at::TensorList tensors, int64_t dim) {
9203return wrapper_CompositeImplicitAutograd__concat(tensors, dim);
9204}
9205at::Tensor & concat_out(at::Tensor & out, at::TensorList tensors, int64_t dim) {
9206return wrapper_CompositeImplicitAutograd_out_concat_out(tensors, dim, out);
9207}
9208at::Tensor & concat_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) {
9209return wrapper_CompositeImplicitAutograd_out_concat_out(tensors, dim, out);
9210}
9211at::Tensor concat(at::TensorList tensors, at::Dimname dim) {
9212return wrapper_CompositeImplicitAutograd_names_concat(tensors, dim);
9213}
9214at::Tensor & concat_out(at::Tensor & out, at::TensorList tensors, at::Dimname dim) {
9215return wrapper_CompositeImplicitAutograd_names_out_concat_out(tensors, dim, out);
9216}
9217at::Tensor & concat_outf(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
9218return wrapper_CompositeImplicitAutograd_names_out_concat_out(tensors, dim, out);
9219}
9220at::Tensor concatenate(at::TensorList tensors, int64_t dim) {
9221return wrapper_CompositeImplicitAutograd__concatenate(tensors, dim);
9222}
9223at::Tensor & concatenate_out(at::Tensor & out, at::TensorList tensors, int64_t dim) {
9224return wrapper_CompositeImplicitAutograd_out_concatenate_out(tensors, dim, out);
9225}
9226at::Tensor & concatenate_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) {
9227return wrapper_CompositeImplicitAutograd_out_concatenate_out(tensors, dim, out);
9228}
9229at::Tensor concatenate(at::TensorList tensors, at::Dimname dim) {
9230return wrapper_CompositeImplicitAutograd_names_concatenate(tensors, dim);
9231}
9232at::Tensor & concatenate_out(at::Tensor & out, at::TensorList tensors, at::Dimname dim) {
9233return wrapper_CompositeImplicitAutograd_names_out_concatenate_out(tensors, dim, out);
9234}
9235at::Tensor & concatenate_outf(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
9236return wrapper_CompositeImplicitAutograd_names_out_concatenate_out(tensors, dim, out);
9237}
9238at::Tensor chain_matmul(at::TensorList matrices) {
9239return wrapper_CompositeImplicitAutograd__chain_matmul(matrices);
9240}
9241at::Tensor & chain_matmul_out(at::Tensor & out, at::TensorList matrices) {
9242return wrapper_CompositeImplicitAutograd_out_chain_matmul_out(matrices, out);
9243}
9244at::Tensor & chain_matmul_outf(at::TensorList matrices, at::Tensor & out) {
9245return wrapper_CompositeImplicitAutograd_out_chain_matmul_out(matrices, out);
9246}
9247::std::vector<at::Tensor> unsafe_chunk(const at::Tensor & self, int64_t chunks, int64_t dim) {
9248return wrapper_CompositeImplicitAutograd__unsafe_chunk(self, chunks, dim);
9249}
9250::std::vector<at::Tensor> chunk(const at::Tensor & self, int64_t chunks, int64_t dim) {
9251return wrapper_CompositeImplicitAutograd__chunk(self, chunks, dim);
9252}
9253::std::vector<at::Tensor> tensor_split(const at::Tensor & self, int64_t sections, int64_t dim) {
9254return wrapper_CompositeImplicitAutograd_sections_tensor_split(self, sections, dim);
9255}
9256::std::vector<at::Tensor> tensor_split_symint(const at::Tensor & self, c10::SymInt sections, int64_t dim) {
9257return wrapper_CompositeImplicitAutograd_sections_tensor_split(self, sections, dim);
9258}
9259::std::vector<at::Tensor> tensor_split(const at::Tensor & self, at::IntArrayRef indices, int64_t dim) {
9260return wrapper_CompositeImplicitAutograd_indices_tensor_split(self, c10::fromIntArrayRefSlow(indices), dim);
9261}
9262::std::vector<at::Tensor> tensor_split_symint(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) {
9263return wrapper_CompositeImplicitAutograd_indices_tensor_split(self, indices, dim);
9264}
9265::std::vector<at::Tensor> tensor_split(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) {
9266return wrapper_CompositeImplicitAutograd_tensor_indices_or_sections_tensor_split(self, tensor_indices_or_sections, dim);
9267}
9268at::Tensor clip(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
9269return wrapper_CompositeImplicitAutograd__clip(self, min, max);
9270}
9271at::Tensor & clip_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
9272return wrapper_CompositeImplicitAutograd_out_clip_out(self, min, max, out);
9273}
9274at::Tensor & clip_outf(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max, at::Tensor & out) {
9275return wrapper_CompositeImplicitAutograd_out_clip_out(self, min, max, out);
9276}
9277at::Tensor & clip_(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
9278return wrapper_CompositeImplicitAutograd__clip_(self, min, max);
9279}
9280at::Tensor clip(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
9281return wrapper_CompositeImplicitAutograd_Tensor_clip(self, min, max);
9282}
9283at::Tensor & clip_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
9284return wrapper_CompositeImplicitAutograd_Tensor_out_clip_out(self, min, max, out);
9285}
9286at::Tensor & clip_outf(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max, at::Tensor & out) {
9287return wrapper_CompositeImplicitAutograd_Tensor_out_clip_out(self, min, max, out);
9288}
9289at::Tensor & clip_(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
9290return wrapper_CompositeImplicitAutograd_Tensor_clip_(self, min, max);
9291}
9292bool cudnn_is_acceptable(const at::Tensor & self) {
9293return wrapper_CompositeImplicitAutograd__cudnn_is_acceptable(self);
9294}
9295at::Tensor contiguous(const at::Tensor & self, at::MemoryFormat memory_format) {
9296return wrapper_CompositeImplicitAutograd__contiguous(self, memory_format);
9297}
9298at::Tensor _convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) {
9299return wrapper_CompositeImplicitAutograd_deprecated__convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
9300}
9301at::Tensor _convolution_mode(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
9302return wrapper_CompositeImplicitAutograd___convolution_mode(input, weight, bias, stride, padding, dilation, groups);
9303}
9304::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
9305return wrapper_CompositeImplicitAutograd___convolution_double_backward(ggI, ggW, ggb, gO, weight, self, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask);
9306}
9307::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward_symint(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
9308return wrapper_CompositeImplicitAutograd___convolution_double_backward(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
9309}
9310at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
9311return wrapper_CompositeImplicitAutograd__conv1d(input, weight, bias, stride, padding, dilation, groups);
9312}
9313at::Tensor conv2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
9314return wrapper_CompositeImplicitAutograd__conv2d(input, weight, bias, stride, padding, dilation, groups);
9315}
9316at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
9317return wrapper_CompositeImplicitAutograd__conv3d(input, weight, bias, stride, padding, dilation, groups);
9318}
9319at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
9320return wrapper_CompositeImplicitAutograd_padding_conv1d(input, weight, bias, stride, padding, dilation, groups);
9321}
9322at::Tensor conv2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
9323return wrapper_CompositeImplicitAutograd_padding_conv2d(input, weight, bias, stride, padding, dilation, groups);
9324}
9325at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
9326return wrapper_CompositeImplicitAutograd_padding_conv3d(input, weight, bias, stride, padding, dilation, groups);
9327}
9328::std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
9329return wrapper_CompositeImplicitAutograd__conv_tbc_backward(self, input, weight, bias, pad);
9330}
9331at::Tensor conv_transpose1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
9332return wrapper_CompositeImplicitAutograd__conv_transpose1d(input, weight, bias, stride, padding, output_padding, groups, dilation);
9333}
9334at::Tensor conv_transpose2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
9335return wrapper_CompositeImplicitAutograd_input_conv_transpose2d(input, weight, bias, stride, padding, output_padding, groups, dilation);
9336}
9337at::Tensor conv_transpose3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
9338return wrapper_CompositeImplicitAutograd_input_conv_transpose3d(input, weight, bias, stride, padding, output_padding, groups, dilation);
9339}
9340at::Tensor cosine_embedding_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
9341return wrapper_CompositeImplicitAutograd__cosine_embedding_loss(input1, input2, target, margin, reduction);
9342}
9343at::Tensor cov(const at::Tensor & self, int64_t correction, const c10::optional<at::Tensor> & fweights, const c10::optional<at::Tensor> & aweights) {
9344return wrapper_CompositeImplicitAutograd__cov(self, correction, fweights, aweights);
9345}
9346at::Tensor corrcoef(const at::Tensor & self) {
9347return wrapper_CompositeImplicitAutograd__corrcoef(self);
9348}
9349::std::tuple<at::Tensor,at::Tensor> cummax(const at::Tensor & self, at::Dimname dim) {
9350return wrapper_CompositeImplicitAutograd_dimname_cummax(self, dim);
9351}
9352::std::tuple<at::Tensor &,at::Tensor &> cummax_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim) {
9353return wrapper_CompositeImplicitAutograd_dimname_out_cummax_out(self, dim, values, indices);
9354}
9355::std::tuple<at::Tensor &,at::Tensor &> cummax_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
9356return wrapper_CompositeImplicitAutograd_dimname_out_cummax_out(self, dim, values, indices);
9357}
9358::std::tuple<at::Tensor,at::Tensor> cummin(const at::Tensor & self, at::Dimname dim) {
9359return wrapper_CompositeImplicitAutograd_dimname_cummin(self, dim);
9360}
9361::std::tuple<at::Tensor &,at::Tensor &> cummin_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim) {
9362return wrapper_CompositeImplicitAutograd_dimname_out_cummin_out(self, dim, values, indices);
9363}
9364::std::tuple<at::Tensor &,at::Tensor &> cummin_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
9365return wrapper_CompositeImplicitAutograd_dimname_out_cummin_out(self, dim, values, indices);
9366}
9367at::Tensor cummaxmin_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) {
9368return wrapper_CompositeImplicitAutograd__cummaxmin_backward(grad, input, indices, dim);
9369}
9370at::Tensor cumprod(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
9371return wrapper_CompositeImplicitAutograd_dimname_cumprod(self, dim, dtype);
9372}
9373at::Tensor & cumprod_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
9374return wrapper_CompositeImplicitAutograd_dimname_out_cumprod_out(self, dim, dtype, out);
9375}
9376at::Tensor & cumprod_outf(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
9377return wrapper_CompositeImplicitAutograd_dimname_out_cumprod_out(self, dim, dtype, out);
9378}
9379at::Tensor & cumprod_(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
9380return wrapper_CompositeImplicitAutograd_dimname_cumprod_(self, dim, dtype);
9381}
9382at::Tensor cumprod_backward(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) {
9383return wrapper_CompositeImplicitAutograd__cumprod_backward(grad, input, dim, output);
9384}
9385at::Tensor cumsum(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
9386return wrapper_CompositeImplicitAutograd_dimname_cumsum(self, dim, dtype);
9387}
9388at::Tensor & cumsum_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
9389return wrapper_CompositeImplicitAutograd_dimname_out_cumsum_out(self, dim, dtype, out);
9390}
9391at::Tensor & cumsum_outf(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
9392return wrapper_CompositeImplicitAutograd_dimname_out_cumsum_out(self, dim, dtype, out);
9393}
9394at::Tensor & cumsum_(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
9395return wrapper_CompositeImplicitAutograd_dimname_cumsum_(self, dim, dtype);
9396}
9397at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
9398return wrapper_CompositeImplicitAutograd_x_cumulative_trapezoid(y, x, dim);
9399}
9400at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
9401return wrapper_CompositeImplicitAutograd_dx_cumulative_trapezoid(y, dx, dim);
9402}
9403at::Tensor ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
9404return wrapper_CompositeImplicitAutograd_IntList_ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
9405}
9406at::Tensor ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
9407return wrapper_CompositeImplicitAutograd_Tensor_ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
9408}
9409at::Tensor diagflat(const at::Tensor & self, int64_t offset) {
9410return wrapper_CompositeImplicitAutograd__diagflat(self, offset);
9411}
9412at::Tensor linalg_diagonal(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) {
9413return wrapper_CompositeImplicitAutograd__linalg_diagonal(A, offset, dim1, dim2);
9414}
9415at::Tensor diagonal(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) {
9416return wrapper_CompositeImplicitAutograd_Dimname_diagonal(self, outdim, dim1, dim2, offset);
9417}
9418at::Tensor & fill_diagonal_(at::Tensor & self, const at::Scalar & fill_value, bool wrap) {
9419return wrapper_CompositeImplicitAutograd__fill_diagonal_(self, fill_value, wrap);
9420}
9421at::Tensor diff(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append) {
9422return wrapper_CompositeImplicitAutograd__diff(self, n, dim, prepend, append);
9423}
9424at::Tensor & diff_out(at::Tensor & out, const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append) {
9425return wrapper_CompositeImplicitAutograd_out_diff_out(self, n, dim, prepend, append, out);
9426}
9427at::Tensor & diff_outf(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append, at::Tensor & out) {
9428return wrapper_CompositeImplicitAutograd_out_diff_out(self, n, dim, prepend, append, out);
9429}
9430::std::vector<at::Tensor> gradient(const at::Tensor & self, const c10::optional<at::Scalar> & spacing, c10::optional<int64_t> dim, int64_t edge_order) {
9431return wrapper_CompositeImplicitAutograd_scalarint_gradient(self, spacing, dim, edge_order);
9432}
9433::std::vector<at::Tensor> gradient(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) {
9434return wrapper_CompositeImplicitAutograd_scalararray_gradient(self, spacing, dim, edge_order);
9435}
9436::std::vector<at::Tensor> gradient(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) {
9437return wrapper_CompositeImplicitAutograd_array_gradient(self, dim, edge_order);
9438}
9439::std::vector<at::Tensor> gradient(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, c10::optional<int64_t> dim, int64_t edge_order) {
9440return wrapper_CompositeImplicitAutograd_scalarrayint_gradient(self, spacing, dim, edge_order);
9441}
9442::std::vector<at::Tensor> gradient(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order) {
9443return wrapper_CompositeImplicitAutograd_scalarrayarray_gradient(self, spacing, dim, edge_order);
9444}
9445::std::vector<at::Tensor> gradient(const at::Tensor & self, at::TensorList spacing, c10::optional<int64_t> dim, int64_t edge_order) {
9446return wrapper_CompositeImplicitAutograd_tensorarrayint_gradient(self, spacing, dim, edge_order);
9447}
9448::std::vector<at::Tensor> gradient(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) {
9449return wrapper_CompositeImplicitAutograd_tensorarray_gradient(self, spacing, dim, edge_order);
9450}
9451at::Tensor divide(const at::Tensor & self, const at::Tensor & other) {
9452return wrapper_CompositeImplicitAutograd_Tensor_divide(self, other);
9453}
9454at::Tensor & divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
9455return wrapper_CompositeImplicitAutograd_out_divide_out(self, other, out);
9456}
9457at::Tensor & divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9458return wrapper_CompositeImplicitAutograd_out_divide_out(self, other, out);
9459}
9460at::Tensor & divide_(at::Tensor & self, const at::Tensor & other) {
9461return wrapper_CompositeImplicitAutograd_Tensor_divide_(self, other);
9462}
9463at::Tensor divide(const at::Tensor & self, const at::Scalar & other) {
9464return wrapper_CompositeImplicitAutograd_Scalar_divide(self, other);
9465}
9466at::Tensor & divide_(at::Tensor & self, const at::Scalar & other) {
9467return wrapper_CompositeImplicitAutograd_Scalar_divide_(self, other);
9468}
9469at::Tensor divide(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
9470return wrapper_CompositeImplicitAutograd_Tensor_mode_divide(self, other, rounding_mode);
9471}
9472at::Tensor & divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
9473return wrapper_CompositeImplicitAutograd_out_mode_divide_out(self, other, rounding_mode, out);
9474}
9475at::Tensor & divide_outf(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
9476return wrapper_CompositeImplicitAutograd_out_mode_divide_out(self, other, rounding_mode, out);
9477}
9478at::Tensor & divide_(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
9479return wrapper_CompositeImplicitAutograd_Tensor_mode_divide_(self, other, rounding_mode);
9480}
9481at::Tensor divide(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
9482return wrapper_CompositeImplicitAutograd_Scalar_mode_divide(self, other, rounding_mode);
9483}
9484at::Tensor & divide_(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
9485return wrapper_CompositeImplicitAutograd_Scalar_mode_divide_(self, other, rounding_mode);
9486}
9487at::Tensor true_divide(const at::Tensor & self, const at::Tensor & other) {
9488return wrapper_CompositeImplicitAutograd_Tensor_true_divide(self, other);
9489}
9490at::Tensor & true_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
9491return wrapper_CompositeImplicitAutograd_out_true_divide_out(self, other, out);
9492}
9493at::Tensor & true_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9494return wrapper_CompositeImplicitAutograd_out_true_divide_out(self, other, out);
9495}
9496at::Tensor & true_divide_(at::Tensor & self, const at::Tensor & other) {
9497return wrapper_CompositeImplicitAutograd_Tensor_true_divide_(self, other);
9498}
9499at::Tensor true_divide(const at::Tensor & self, const at::Scalar & other) {
9500return wrapper_CompositeImplicitAutograd_Scalar_true_divide(self, other);
9501}
9502at::Tensor & true_divide_(at::Tensor & self, const at::Scalar & other) {
9503return wrapper_CompositeImplicitAutograd_Scalar_true_divide_(self, other);
9504}
9505at::Tensor einsum(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) {
9506return wrapper_CompositeImplicitAutograd__einsum(equation, tensors, path);
9507}
9508at::Tensor embedding_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) {
9509return wrapper_CompositeImplicitAutograd__embedding_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
9510}
9511at::Tensor embedding_backward_symint(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
9512return wrapper_CompositeImplicitAutograd__embedding_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
9513}
9514at::Tensor embedding_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
9515return wrapper_CompositeImplicitAutograd__embedding_sparse_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq);
9516}
9517::std::tuple<at::Tensor,at::Tensor> _rowwise_prune(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) {
9518return wrapper_CompositeImplicitAutograd___rowwise_prune(weight, mask, compressed_indices_dtype);
9519}
9520at::Tensor row_stack(at::TensorList tensors) {
9521return wrapper_CompositeImplicitAutograd__row_stack(tensors);
9522}
9523at::Tensor & row_stack_out(at::Tensor & out, at::TensorList tensors) {
9524return wrapper_CompositeImplicitAutograd_out_row_stack_out(tensors, out);
9525}
9526at::Tensor & row_stack_outf(at::TensorList tensors, at::Tensor & out) {
9527return wrapper_CompositeImplicitAutograd_out_row_stack_out(tensors, out);
9528}
9529::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset) {
9530return wrapper_CompositeImplicitAutograd__embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset);
9531}
9532::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx) {
9533return wrapper_CompositeImplicitAutograd_padding_idx_embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
9534}
9535at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
9536return wrapper_CompositeImplicitAutograd___embedding_bag_backward(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
9537}
9538at::Tensor _embedding_bag_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
9539return wrapper_CompositeImplicitAutograd___embedding_bag_backward(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
9540}
9541at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
9542return wrapper_CompositeImplicitAutograd___embedding_bag_sparse_backward(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
9543}
9544at::Tensor _embedding_bag_sparse_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
9545return wrapper_CompositeImplicitAutograd___embedding_bag_sparse_backward(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
9546}
9547at::Tensor & empty_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
9548return wrapper_CompositeImplicitAutograd_out_empty_out(c10::fromIntArrayRefSlow(size), memory_format, out);
9549}
9550at::Tensor & empty_outf(at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
9551return wrapper_CompositeImplicitAutograd_out_empty_out(c10::fromIntArrayRefSlow(size), memory_format, out);
9552}
9553at::Tensor & empty_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
9554return wrapper_CompositeImplicitAutograd_out_empty_out(size, memory_format, out);
9555}
9556at::Tensor & empty_symint_outf(c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
9557return wrapper_CompositeImplicitAutograd_out_empty_out(size, memory_format, out);
9558}
9559at::Tensor expand_as(const at::Tensor & self, const at::Tensor & other) {
9560return wrapper_CompositeImplicitAutograd__expand_as(self, other);
9561}
9562at::Tensor flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim) {
9563return wrapper_CompositeImplicitAutograd_using_ints_flatten(self, start_dim, end_dim);
9564}
9565at::Tensor flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) {
9566return wrapper_CompositeImplicitAutograd_named_out_dim_flatten(self, start_dim, end_dim, out_dim);
9567}
9568at::Tensor flatten(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) {
9569return wrapper_CompositeImplicitAutograd_using_names_flatten(self, start_dim, end_dim, out_dim);
9570}
9571at::Tensor flatten(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) {
9572return wrapper_CompositeImplicitAutograd_DimnameList_flatten(self, dims, out_dim);
9573}
9574at::Tensor unflatten(const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) {
9575return wrapper_CompositeImplicitAutograd_int_unflatten(self, dim, sizes);
9576}
9577at::Tensor unflatten(const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) {
9578return wrapper_CompositeImplicitAutograd_Dimname_unflatten(self, dim, sizes, names);
9579}
9580at::Tensor floor_divide(const at::Tensor & self, const at::Scalar & other) {
9581return wrapper_CompositeImplicitAutograd_Scalar_floor_divide(self, other);
9582}
9583at::Tensor & floor_divide_(at::Tensor & self, const at::Scalar & other) {
9584return wrapper_CompositeImplicitAutograd_Scalar_floor_divide_(self, other);
9585}
9586at::Tensor grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
9587return wrapper_CompositeImplicitAutograd__grid_sampler(input, grid, interpolation_mode, padding_mode, align_corners);
9588}
9589::std::tuple<at::Tensor,at::Tensor> _grid_sampler_2d_cpu_fallback_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
9590return wrapper_CompositeImplicitAutograd___grid_sampler_2d_cpu_fallback_backward(grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
9591}
9592at::Tensor hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) {
9593return wrapper_CompositeImplicitAutograd__hinge_embedding_loss(self, target, margin, reduction);
9594}
9595at::Tensor group_norm(const at::Tensor & input, int64_t num_groups, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enabled) {
9596return wrapper_CompositeImplicitAutograd__group_norm(input, num_groups, weight, bias, eps, cudnn_enabled);
9597}
9598int64_t _cufft_get_plan_cache_size(int64_t device_index) {
9599return wrapper_CompositeImplicitAutograd___cufft_get_plan_cache_size(device_index);
9600}
9601int64_t _cufft_get_plan_cache_max_size(int64_t device_index) {
9602return wrapper_CompositeImplicitAutograd___cufft_get_plan_cache_max_size(device_index);
9603}
9604void _cufft_set_plan_cache_max_size(int64_t device_index, int64_t max_size) {
9605return wrapper_CompositeImplicitAutograd___cufft_set_plan_cache_max_size(device_index, max_size);
9606}
9607void _cufft_clear_plan_cache(int64_t device_index) {
9608return wrapper_CompositeImplicitAutograd___cufft_clear_plan_cache(device_index);
9609}
9610at::Tensor & index_copy_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
9611return wrapper_CompositeImplicitAutograd_dimname_index_copy_(self, dim, index, source);
9612}
9613at::Tensor index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
9614return wrapper_CompositeImplicitAutograd_dimname_index_copy(self, dim, index, source);
9615}
9616at::Tensor instance_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
9617return wrapper_CompositeImplicitAutograd__instance_norm(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
9618}
9619at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
9620return wrapper_CompositeImplicitAutograd__isclose(self, other, rtol, atol, equal_nan);
9621}
9622bool is_distributed(const at::Tensor & self) {
9623return wrapper_CompositeImplicitAutograd__is_distributed(self);
9624}
9625bool is_floating_point(const at::Tensor & self) {
9626return wrapper_CompositeImplicitAutograd__is_floating_point(self);
9627}
9628bool is_complex(const at::Tensor & self) {
9629return wrapper_CompositeImplicitAutograd__is_complex(self);
9630}
9631bool is_conj(const at::Tensor & self) {
9632return wrapper_CompositeImplicitAutograd__is_conj(self);
9633}
9634bool _is_zerotensor(const at::Tensor & self) {
9635return wrapper_CompositeImplicitAutograd___is_zerotensor(self);
9636}
9637bool is_neg(const at::Tensor & self) {
9638return wrapper_CompositeImplicitAutograd__is_neg(self);
9639}
9640at::Tensor isreal(const at::Tensor & self) {
9641return wrapper_CompositeImplicitAutograd__isreal(self);
9642}
9643bool is_nonzero(const at::Tensor & self) {
9644return wrapper_CompositeImplicitAutograd__is_nonzero(self);
9645}
9646bool is_signed(const at::Tensor & self) {
9647return wrapper_CompositeImplicitAutograd__is_signed(self);
9648}
9649bool is_inference(const at::Tensor & self) {
9650return wrapper_CompositeImplicitAutograd__is_inference(self);
9651}
9652at::Tensor kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) {
9653return wrapper_CompositeImplicitAutograd__kl_div(self, target, reduction, log_target);
9654}
9655at::Tensor kron(const at::Tensor & self, const at::Tensor & other) {
9656return wrapper_CompositeImplicitAutograd__kron(self, other);
9657}
9658at::Tensor & kron_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
9659return wrapper_CompositeImplicitAutograd_out_kron_out(self, other, out);
9660}
9661at::Tensor & kron_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9662return wrapper_CompositeImplicitAutograd_out_kron_out(self, other, out);
9663}
9664::std::tuple<at::Tensor,at::Tensor> kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) {
9665return wrapper_CompositeImplicitAutograd_dimname_kthvalue(self, k, dim, keepdim);
9666}
9667::std::tuple<at::Tensor &,at::Tensor &> kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) {
9668return wrapper_CompositeImplicitAutograd_dimname_out_kthvalue_out(self, k, dim, keepdim, values, indices);
9669}
9670::std::tuple<at::Tensor &,at::Tensor &> kthvalue_outf(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
9671return wrapper_CompositeImplicitAutograd_dimname_out_kthvalue_out(self, k, dim, keepdim, values, indices);
9672}
9673at::Tensor layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enable) {
9674return wrapper_CompositeImplicitAutograd__layer_norm(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, cudnn_enable);
9675}
9676at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enable) {
9677return wrapper_CompositeImplicitAutograd__layer_norm(input, normalized_shape, weight, bias, eps, cudnn_enable);
9678}
9679at::Tensor linear(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
9680return wrapper_CompositeImplicitAutograd__linear(input, weight, bias);
9681}
9682at::Tensor fbgemm_linear_int8_weight_fp32_activation(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
9683return wrapper_CompositeImplicitAutograd__fbgemm_linear_int8_weight_fp32_activation(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
9684}
9685at::Tensor fbgemm_linear_int8_weight(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
9686return wrapper_CompositeImplicitAutograd__fbgemm_linear_int8_weight(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
9687}
9688::std::tuple<at::Tensor,at::Tensor,double,int64_t> fbgemm_linear_quantize_weight(const at::Tensor & input) {
9689return wrapper_CompositeImplicitAutograd__fbgemm_linear_quantize_weight(input);
9690}
9691at::Tensor fbgemm_pack_gemm_matrix_fp16(const at::Tensor & input) {
9692return wrapper_CompositeImplicitAutograd__fbgemm_pack_gemm_matrix_fp16(input);
9693}
9694at::Tensor fbgemm_linear_fp16_weight_fp32_activation(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
9695return wrapper_CompositeImplicitAutograd__fbgemm_linear_fp16_weight_fp32_activation(input, packed_weight, bias);
9696}
9697at::Tensor fbgemm_linear_fp16_weight(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
9698return wrapper_CompositeImplicitAutograd__fbgemm_linear_fp16_weight(input, packed_weight, bias);
9699}
9700at::Tensor fbgemm_pack_quantized_matrix(const at::Tensor & input) {
9701return wrapper_CompositeImplicitAutograd__fbgemm_pack_quantized_matrix(input);
9702}
9703at::Tensor fbgemm_pack_quantized_matrix(const at::Tensor & input, int64_t K, int64_t N) {
9704return wrapper_CompositeImplicitAutograd_KN_fbgemm_pack_quantized_matrix(input, K, N);
9705}
9706at::Tensor ldexp(const at::Tensor & self, const at::Tensor & other) {
9707return wrapper_CompositeImplicitAutograd_Tensor_ldexp(self, other);
9708}
9709at::Tensor & ldexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
9710return wrapper_CompositeImplicitAutograd_out_ldexp_out(self, other, out);
9711}
9712at::Tensor & ldexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9713return wrapper_CompositeImplicitAutograd_out_ldexp_out(self, other, out);
9714}
9715at::Tensor & ldexp_(at::Tensor & self, const at::Tensor & other) {
9716return wrapper_CompositeImplicitAutograd__ldexp_(self, other);
9717}
9718at::Tensor log_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
9719return wrapper_CompositeImplicitAutograd_int_log_softmax(self, dim, dtype);
9720}
9721at::Tensor log_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
9722return wrapper_CompositeImplicitAutograd_Dimname_log_softmax(self, dim, dtype);
9723}
9724at::Tensor logcumsumexp(const at::Tensor & self, at::Dimname dim) {
9725return wrapper_CompositeImplicitAutograd_dimname_logcumsumexp(self, dim);
9726}
9727at::Tensor & logcumsumexp_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim) {
9728return wrapper_CompositeImplicitAutograd_dimname_out_logcumsumexp_out(self, dim, out);
9729}
9730at::Tensor & logcumsumexp_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & out) {
9731return wrapper_CompositeImplicitAutograd_dimname_out_logcumsumexp_out(self, dim, out);
9732}
9733at::Tensor logsumexp(const at::Tensor & self, at::DimnameList dim, bool keepdim) {
9734return wrapper_CompositeImplicitAutograd_names_logsumexp(self, dim, keepdim);
9735}
9736at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim) {
9737return wrapper_CompositeImplicitAutograd_names_out_logsumexp_out(self, dim, keepdim, out);
9738}
9739at::Tensor & logsumexp_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) {
9740return wrapper_CompositeImplicitAutograd_names_out_logsumexp_out(self, dim, keepdim, out);
9741}
9742at::Tensor margin_ranking_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
9743return wrapper_CompositeImplicitAutograd__margin_ranking_loss(input1, input2, target, margin, reduction);
9744}
9745at::Tensor matmul(const at::Tensor & self, const at::Tensor & other) {
9746return wrapper_CompositeImplicitAutograd__matmul(self, other);
9747}
9748at::Tensor & matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
9749return wrapper_CompositeImplicitAutograd_out_matmul_out(self, other, out);
9750}
9751at::Tensor & matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9752return wrapper_CompositeImplicitAutograd_out_matmul_out(self, other, out);
9753}
9754at::Tensor matrix_power(const at::Tensor & self, int64_t n) {
9755return wrapper_CompositeImplicitAutograd__matrix_power(self, n);
9756}
9757at::Tensor & matrix_power_out(at::Tensor & out, const at::Tensor & self, int64_t n) {
9758return wrapper_CompositeImplicitAutograd_out_matrix_power_out(self, n, out);
9759}
9760at::Tensor & matrix_power_outf(const at::Tensor & self, int64_t n, at::Tensor & out) {
9761return wrapper_CompositeImplicitAutograd_out_matrix_power_out(self, n, out);
9762}
9763at::Tensor matrix_exp(const at::Tensor & self) {
9764return wrapper_CompositeImplicitAutograd__matrix_exp(self);
9765}
9766at::Tensor matrix_exp_backward(const at::Tensor & self, const at::Tensor & grad) {
9767return wrapper_CompositeImplicitAutograd__matrix_exp_backward(self, grad);
9768}
9769::std::tuple<at::Tensor,at::Tensor> max(const at::Tensor & self, at::Dimname dim, bool keepdim) {
9770return wrapper_CompositeImplicitAutograd_names_dim_max(self, dim, keepdim);
9771}
9772::std::tuple<at::Tensor &,at::Tensor &> max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, at::Dimname dim, bool keepdim) {
9773return wrapper_CompositeImplicitAutograd_names_dim_max_max_out(self, dim, keepdim, max, max_values);
9774}
9775::std::tuple<at::Tensor &,at::Tensor &> max_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
9776return wrapper_CompositeImplicitAutograd_names_dim_max_max_out(self, dim, keepdim, max, max_values);
9777}
9778at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim) {
9779return wrapper_CompositeImplicitAutograd__value_selecting_reduction_backward(grad, dim, indices, c10::fromIntArrayRefSlow(sizes), keepdim);
9780}
9781at::Tensor value_selecting_reduction_backward_symint(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) {
9782return wrapper_CompositeImplicitAutograd__value_selecting_reduction_backward(grad, dim, indices, sizes, keepdim);
9783}
9784::std::tuple<at::Tensor,at::Tensor> max_pool1d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
9785return wrapper_CompositeImplicitAutograd__max_pool1d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
9786}
9787at::Tensor max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
9788return wrapper_CompositeImplicitAutograd__max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode);
9789}
9790at::Tensor max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
9791return wrapper_CompositeImplicitAutograd__max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode);
9792}
9793at::Tensor max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
9794return wrapper_CompositeImplicitAutograd__max_pool3d(self, kernel_size, stride, padding, dilation, ceil_mode);
9795}
9796at::Tensor mean(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
9797return wrapper_CompositeImplicitAutograd_names_dim_mean(self, dim, keepdim, dtype);
9798}
9799at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
9800return wrapper_CompositeImplicitAutograd_names_out_mean_out(self, dim, keepdim, dtype, out);
9801}
9802at::Tensor & mean_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
9803return wrapper_CompositeImplicitAutograd_names_out_mean_out(self, dim, keepdim, dtype, out);
9804}
9805at::Tensor nanmean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
9806return wrapper_CompositeImplicitAutograd__nanmean(self, dim, keepdim, dtype);
9807}
9808at::Tensor & nanmean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
9809return wrapper_CompositeImplicitAutograd_out_nanmean_out(self, dim, keepdim, dtype, out);
9810}
9811at::Tensor & nanmean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
9812return wrapper_CompositeImplicitAutograd_out_nanmean_out(self, dim, keepdim, dtype, out);
9813}
9814::std::tuple<at::Tensor,at::Tensor> median(const at::Tensor & self, at::Dimname dim, bool keepdim) {
9815return wrapper_CompositeImplicitAutograd_names_dim_median(self, dim, keepdim);
9816}
9817::std::tuple<at::Tensor &,at::Tensor &> median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim) {
9818return wrapper_CompositeImplicitAutograd_names_dim_values_median_out(self, dim, keepdim, values, indices);
9819}
9820::std::tuple<at::Tensor &,at::Tensor &> median_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
9821return wrapper_CompositeImplicitAutograd_names_dim_values_median_out(self, dim, keepdim, values, indices);
9822}
9823::std::tuple<at::Tensor,at::Tensor> nanmedian(const at::Tensor & self, at::Dimname dim, bool keepdim) {
9824return wrapper_CompositeImplicitAutograd_names_dim_nanmedian(self, dim, keepdim);
9825}
9826::std::tuple<at::Tensor &,at::Tensor &> nanmedian_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim) {
9827return wrapper_CompositeImplicitAutograd_names_dim_values_nanmedian_out(self, dim, keepdim, values, indices);
9828}
9829::std::tuple<at::Tensor &,at::Tensor &> nanmedian_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
9830return wrapper_CompositeImplicitAutograd_names_dim_values_nanmedian_out(self, dim, keepdim, values, indices);
9831}
9832::std::tuple<at::Tensor,at::Tensor> min(const at::Tensor & self, at::Dimname dim, bool keepdim) {
9833return wrapper_CompositeImplicitAutograd_names_dim_min(self, dim, keepdim);
9834}
9835::std::tuple<at::Tensor &,at::Tensor &> min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, at::Dimname dim, bool keepdim) {
9836return wrapper_CompositeImplicitAutograd_names_dim_min_min_out(self, dim, keepdim, min, min_indices);
9837}
9838::std::tuple<at::Tensor &,at::Tensor &> min_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
9839return wrapper_CompositeImplicitAutograd_names_dim_min_min_out(self, dim, keepdim, min, min_indices);
9840}
9841at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense) {
9842return wrapper_CompositeImplicitAutograd___sparse_mm(sparse, dense);
9843}
9844at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) {
9845return wrapper_CompositeImplicitAutograd_reduce__sparse_mm(sparse, dense, reduce);
9846}
9847::std::tuple<at::Tensor,at::Tensor> mode(const at::Tensor & self, at::Dimname dim, bool keepdim) {
9848return wrapper_CompositeImplicitAutograd_dimname_mode(self, dim, keepdim);
9849}
9850::std::tuple<at::Tensor &,at::Tensor &> mode_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim) {
9851return wrapper_CompositeImplicitAutograd_dimname_out_mode_out(self, dim, keepdim, values, indices);
9852}
9853::std::tuple<at::Tensor &,at::Tensor &> mode_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
9854return wrapper_CompositeImplicitAutograd_dimname_out_mode_out(self, dim, keepdim, values, indices);
9855}
9856at::Tensor multiply(const at::Tensor & self, const at::Tensor & other) {
9857return wrapper_CompositeImplicitAutograd_Tensor_multiply(self, other);
9858}
9859at::Tensor & multiply_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
9860return wrapper_CompositeImplicitAutograd_out_multiply_out(self, other, out);
9861}
9862at::Tensor & multiply_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9863return wrapper_CompositeImplicitAutograd_out_multiply_out(self, other, out);
9864}
9865at::Tensor & multiply_(at::Tensor & self, const at::Tensor & other) {
9866return wrapper_CompositeImplicitAutograd_Tensor_multiply_(self, other);
9867}
9868at::Tensor multiply(const at::Tensor & self, const at::Scalar & other) {
9869return wrapper_CompositeImplicitAutograd_Scalar_multiply(self, other);
9870}
9871at::Tensor & multiply_(at::Tensor & self, const at::Scalar & other) {
9872return wrapper_CompositeImplicitAutograd_Scalar_multiply_(self, other);
9873}
9874at::Tensor narrow(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
9875return wrapper_CompositeImplicitAutograd__narrow(self, dim, start, length);
9876}
9877at::Tensor narrow_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
9878return wrapper_CompositeImplicitAutograd__narrow(self, dim, start, length);
9879}
9880at::Tensor narrow(const at::Tensor & self, int64_t dim, const at::Tensor & start, int64_t length) {
9881return wrapper_CompositeImplicitAutograd_Tensor_narrow(self, dim, start, length);
9882}
9883at::Tensor narrow_symint(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) {
9884return wrapper_CompositeImplicitAutograd_Tensor_narrow(self, dim, start, length);
9885}
9886bool is_vulkan_available() {
9887return wrapper_CompositeImplicitAutograd__is_vulkan_available();
9888}
9889bool _nnpack_available() {
9890return wrapper_CompositeImplicitAutograd___nnpack_available();
9891}
9892at::Tensor pairwise_distance(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) {
9893return wrapper_CompositeImplicitAutograd__pairwise_distance(x1, x2, p, eps, keepdim);
9894}
9895at::Tensor cdist(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
9896return wrapper_CompositeImplicitAutograd__cdist(x1, x2, p, compute_mode);
9897}
9898at::Tensor pdist(const at::Tensor & self, double p) {
9899return wrapper_CompositeImplicitAutograd__pdist(self, p);
9900}
9901at::Tensor cosine_similarity(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) {
9902return wrapper_CompositeImplicitAutograd__cosine_similarity(x1, x2, dim, eps);
9903}
9904at::Tensor movedim(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
9905return wrapper_CompositeImplicitAutograd_intlist_movedim(self, source, destination);
9906}
9907at::Tensor movedim(const at::Tensor & self, int64_t source, int64_t destination) {
9908return wrapper_CompositeImplicitAutograd_int_movedim(self, source, destination);
9909}
9910at::Tensor moveaxis(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
9911return wrapper_CompositeImplicitAutograd_intlist_moveaxis(self, source, destination);
9912}
9913at::Tensor moveaxis(const at::Tensor & self, int64_t source, int64_t destination) {
9914return wrapper_CompositeImplicitAutograd_int_moveaxis(self, source, destination);
9915}
9916at::Tensor numpy_T(const at::Tensor & self) {
9917return wrapper_CompositeImplicitAutograd__numpy_T(self);
9918}
9919at::Tensor matrix_H(const at::Tensor & self) {
9920return wrapper_CompositeImplicitAutograd__matrix_H(self);
9921}
9922at::Tensor mT(const at::Tensor & self) {
9923return wrapper_CompositeImplicitAutograd__mT(self);
9924}
9925at::Tensor mH(const at::Tensor & self) {
9926return wrapper_CompositeImplicitAutograd__mH(self);
9927}
9928at::Tensor adjoint(const at::Tensor & self) {
9929return wrapper_CompositeImplicitAutograd__adjoint(self);
9930}
9931at::Tensor native_channel_shuffle(const at::Tensor & self, int64_t groups) {
9932return wrapper_CompositeImplicitAutograd__native_channel_shuffle(self, groups);
9933}
9934at::Tensor pin_memory(const at::Tensor & self, c10::optional<at::Device> device) {
9935return wrapper_CompositeImplicitAutograd__pin_memory(self, device);
9936}
9937at::Tensor pinverse(const at::Tensor & self, double rcond) {
9938return wrapper_CompositeImplicitAutograd__pinverse(self, rcond);
9939}
9940at::Tensor poisson_nll_loss(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) {
9941return wrapper_CompositeImplicitAutograd__poisson_nll_loss(input, target, log_input, full, eps, reduction);
9942}
9943at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator) {
9944return wrapper_CompositeImplicitAutograd_generator_out_rand_out(c10::fromIntArrayRefSlow(size), generator, out);
9945}
9946at::Tensor & rand_outf(at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
9947return wrapper_CompositeImplicitAutograd_generator_out_rand_out(c10::fromIntArrayRefSlow(size), generator, out);
9948}
9949at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) {
9950return wrapper_CompositeImplicitAutograd_generator_out_rand_out(size, generator, out);
9951}
9952at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
9953return wrapper_CompositeImplicitAutograd_generator_out_rand_out(size, generator, out);
9954}
9955at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size) {
9956return wrapper_CompositeImplicitAutograd_out_randn_out(c10::fromIntArrayRefSlow(size), out);
9957}
9958at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out) {
9959return wrapper_CompositeImplicitAutograd_out_randn_out(c10::fromIntArrayRefSlow(size), out);
9960}
9961at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size) {
9962return wrapper_CompositeImplicitAutograd_out_randn_out(size, out);
9963}
9964at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) {
9965return wrapper_CompositeImplicitAutograd_out_randn_out(size, out);
9966}
9967at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator) {
9968return wrapper_CompositeImplicitAutograd_generator_out_randn_out(c10::fromIntArrayRefSlow(size), generator, out);
9969}
9970at::Tensor & randn_outf(at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
9971return wrapper_CompositeImplicitAutograd_generator_out_randn_out(c10::fromIntArrayRefSlow(size), generator, out);
9972}
9973at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) {
9974return wrapper_CompositeImplicitAutograd_generator_out_randn_out(size, generator, out);
9975}
9976at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
9977return wrapper_CompositeImplicitAutograd_generator_out_randn_out(size, generator, out);
9978}
9979at::Tensor ravel(const at::Tensor & self) {
9980return wrapper_CompositeImplicitAutograd__ravel(self);
9981}
9982at::Tensor negative(const at::Tensor & self) {
9983return wrapper_CompositeImplicitAutograd__negative(self);
9984}
9985at::Tensor & negative_out(at::Tensor & out, const at::Tensor & self) {
9986return wrapper_CompositeImplicitAutograd_out_negative_out(self, out);
9987}
9988at::Tensor & negative_outf(const at::Tensor & self, at::Tensor & out) {
9989return wrapper_CompositeImplicitAutograd_out_negative_out(self, out);
9990}
9991at::Tensor & negative_(at::Tensor & self) {
9992return wrapper_CompositeImplicitAutograd__negative_(self);
9993}
9994at::Tensor repeat_interleave(const at::Tensor & self, const at::Tensor & repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) {
9995return wrapper_CompositeImplicitAutograd_self_Tensor_repeat_interleave(self, repeats, dim, output_size);
9996}
9997at::Tensor repeat_interleave(const at::Tensor & self, int64_t repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) {
9998return wrapper_CompositeImplicitAutograd_self_int_repeat_interleave(self, repeats, dim, output_size);
9999}
10000at::Tensor repeat_interleave_symint(const at::Tensor & self, c10::SymInt repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) {
10001return wrapper_CompositeImplicitAutograd_self_int_repeat_interleave(self, repeats, dim, output_size);
10002}
10003at::Tensor reshape(const at::Tensor & self, at::IntArrayRef shape) {
10004return wrapper_CompositeImplicitAutograd__reshape(self, c10::fromIntArrayRefSlow(shape));
10005}
10006at::Tensor reshape_symint(const at::Tensor & self, c10::SymIntArrayRef shape) {
10007return wrapper_CompositeImplicitAutograd__reshape(self, shape);
10008}
10009at::Tensor reshape_as(const at::Tensor & self, const at::Tensor & other) {
10010return wrapper_CompositeImplicitAutograd__reshape_as(self, other);
10011}
10012at::Tensor rrelu(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
10013return wrapper_CompositeImplicitAutograd__rrelu(self, lower, upper, training, generator);
10014}
10015at::Tensor & rrelu_(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
10016return wrapper_CompositeImplicitAutograd__rrelu_(self, lower, upper, training, generator);
10017}
10018at::Tensor relu6(const at::Tensor & self) {
10019return wrapper_CompositeImplicitAutograd__relu6(self);
10020}
10021at::Tensor & relu6_(at::Tensor & self) {
10022return wrapper_CompositeImplicitAutograd__relu6_(self);
10023}
10024at::Tensor prelu(const at::Tensor & self, const at::Tensor & weight) {
10025return wrapper_CompositeImplicitAutograd__prelu(self, weight);
10026}
10027at::Tensor infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self) {
10028return wrapper_CompositeImplicitAutograd__infinitely_differentiable_gelu_backward(grad, self);
10029}
10030at::Tensor select(const at::Tensor & self, at::Dimname dim, int64_t index) {
10031return wrapper_CompositeImplicitAutograd_Dimname_select(self, dim, index);
10032}
10033at::Tensor selu(const at::Tensor & self) {
10034return wrapper_CompositeImplicitAutograd__selu(self);
10035}
10036at::Tensor & selu_(at::Tensor & self) {
10037return wrapper_CompositeImplicitAutograd__selu_(self);
10038}
10039at::Tensor silu_backward(const at::Tensor & grad_output, const at::Tensor & self) {
10040return wrapper_CompositeImplicitAutograd__silu_backward(grad_output, self);
10041}
10042at::Tensor mish_backward(const at::Tensor & grad_output, const at::Tensor & self) {
10043return wrapper_CompositeImplicitAutograd__mish_backward(grad_output, self);
10044}
10045int64_t size(const at::Tensor & self, int64_t dim) {
10046return wrapper_CompositeImplicitAutograd_int_size(self, dim);
10047}
10048int64_t size(const at::Tensor & self, at::Dimname dim) {
10049return wrapper_CompositeImplicitAutograd_Dimname_size(self, dim);
10050}
10051at::Tensor smm(const at::Tensor & self, const at::Tensor & mat2) {
10052return wrapper_CompositeImplicitAutograd__smm(self, mat2);
10053}
10054at::Tensor softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
10055return wrapper_CompositeImplicitAutograd_int_softmax(self, dim, dtype);
10056}
10057at::Tensor softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
10058return wrapper_CompositeImplicitAutograd_Dimname_softmax(self, dim, dtype);
10059}
10060::std::vector<at::Tensor> split(const at::Tensor & self, at::IntArrayRef split_size, int64_t dim) {
10061return wrapper_CompositeImplicitAutograd_sizes_split(self, c10::fromIntArrayRefSlow(split_size), dim);
10062}
10063::std::vector<at::Tensor> split_symint(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) {
10064return wrapper_CompositeImplicitAutograd_sizes_split(self, split_size, dim);
10065}
10066::std::vector<at::Tensor> hsplit(const at::Tensor & self, int64_t sections) {
10067return wrapper_CompositeImplicitAutograd_int_hsplit(self, sections);
10068}
10069::std::vector<at::Tensor> hsplit(const at::Tensor & self, at::IntArrayRef indices) {
10070return wrapper_CompositeImplicitAutograd_array_hsplit(self, indices);
10071}
10072::std::vector<at::Tensor> vsplit(const at::Tensor & self, int64_t sections) {
10073return wrapper_CompositeImplicitAutograd_int_vsplit(self, sections);
10074}
10075::std::vector<at::Tensor> vsplit(const at::Tensor & self, at::IntArrayRef indices) {
10076return wrapper_CompositeImplicitAutograd_array_vsplit(self, indices);
10077}
10078::std::vector<at::Tensor> dsplit(const at::Tensor & self, int64_t sections) {
10079return wrapper_CompositeImplicitAutograd_int_dsplit(self, sections);
10080}
10081::std::vector<at::Tensor> dsplit(const at::Tensor & self, at::IntArrayRef indices) {
10082return wrapper_CompositeImplicitAutograd_array_dsplit(self, indices);
10083}
10084at::Tensor squeeze(const at::Tensor & self, at::Dimname dim) {
10085return wrapper_CompositeImplicitAutograd_dimname_squeeze(self, dim);
10086}
10087at::Tensor & squeeze_(at::Tensor & self, at::Dimname dim) {
10088return wrapper_CompositeImplicitAutograd_dimname_squeeze_(self, dim);
10089}
10090at::Tensor sspaddmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
10091return wrapper_CompositeImplicitAutograd__sspaddmm(self, mat1, mat2, beta, alpha);
10092}
10093at::Tensor hstack(at::TensorList tensors) {
10094return wrapper_CompositeImplicitAutograd__hstack(tensors);
10095}
10096at::Tensor & hstack_out(at::Tensor & out, at::TensorList tensors) {
10097return wrapper_CompositeImplicitAutograd_out_hstack_out(tensors, out);
10098}
10099at::Tensor & hstack_outf(at::TensorList tensors, at::Tensor & out) {
10100return wrapper_CompositeImplicitAutograd_out_hstack_out(tensors, out);
10101}
10102at::Tensor vstack(at::TensorList tensors) {
10103return wrapper_CompositeImplicitAutograd__vstack(tensors);
10104}
10105at::Tensor & vstack_out(at::Tensor & out, at::TensorList tensors) {
10106return wrapper_CompositeImplicitAutograd_out_vstack_out(tensors, out);
10107}
10108at::Tensor & vstack_outf(at::TensorList tensors, at::Tensor & out) {
10109return wrapper_CompositeImplicitAutograd_out_vstack_out(tensors, out);
10110}
10111at::Tensor dstack(at::TensorList tensors) {
10112return wrapper_CompositeImplicitAutograd__dstack(tensors);
10113}
10114at::Tensor & dstack_out(at::Tensor & out, at::TensorList tensors) {
10115return wrapper_CompositeImplicitAutograd_out_dstack_out(tensors, out);
10116}
10117at::Tensor & dstack_outf(at::TensorList tensors, at::Tensor & out) {
10118return wrapper_CompositeImplicitAutograd_out_dstack_out(tensors, out);
10119}
10120at::Tensor stft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
10121return wrapper_CompositeImplicitAutograd__stft(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex);
10122}
10123at::Tensor stft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
10124return wrapper_CompositeImplicitAutograd_center_stft(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex);
10125}
10126at::Tensor istft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, bool normalized, c10::optional<bool> onesided, c10::optional<int64_t> length, bool return_complex) {
10127return wrapper_CompositeImplicitAutograd__istft(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
10128}
10129int64_t stride(const at::Tensor & self, int64_t dim) {
10130return wrapper_CompositeImplicitAutograd_int_stride(self, dim);
10131}
10132int64_t stride(const at::Tensor & self, at::Dimname dim) {
10133return wrapper_CompositeImplicitAutograd_Dimname_stride(self, dim);
10134}
10135at::Tensor sum(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
10136return wrapper_CompositeImplicitAutograd_dim_DimnameList_sum(self, dim, keepdim, dtype);
10137}
10138at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
10139return wrapper_CompositeImplicitAutograd_DimnameList_out_sum_out(self, dim, keepdim, dtype, out);
10140}
10141at::Tensor & sum_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
10142return wrapper_CompositeImplicitAutograd_DimnameList_out_sum_out(self, dim, keepdim, dtype, out);
10143}
10144at::Tensor sum_to_size(const at::Tensor & self, at::IntArrayRef size) {
10145return wrapper_CompositeImplicitAutograd__sum_to_size(self, size);
10146}
10147at::Tensor square(const at::Tensor & self) {
10148return wrapper_CompositeImplicitAutograd__square(self);
10149}
10150at::Tensor & square_out(at::Tensor & out, const at::Tensor & self) {
10151return wrapper_CompositeImplicitAutograd_out_square_out(self, out);
10152}
10153at::Tensor & square_outf(const at::Tensor & self, at::Tensor & out) {
10154return wrapper_CompositeImplicitAutograd_out_square_out(self, out);
10155}
10156at::Tensor & square_(at::Tensor & self) {
10157return wrapper_CompositeImplicitAutograd__square_(self);
10158}
10159at::Tensor std(const at::Tensor & self, bool unbiased) {
10160return wrapper_CompositeImplicitAutograd__std(self, unbiased);
10161}
10162at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
10163return wrapper_CompositeImplicitAutograd_dim_std(self, dim, unbiased, keepdim);
10164}
10165at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
10166return wrapper_CompositeImplicitAutograd_out_std_out(self, dim, unbiased, keepdim, out);
10167}
10168at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
10169return wrapper_CompositeImplicitAutograd_out_std_out(self, dim, unbiased, keepdim, out);
10170}
10171::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, bool unbiased) {
10172return wrapper_CompositeImplicitAutograd__std_mean(self, unbiased);
10173}
10174::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
10175return wrapper_CompositeImplicitAutograd_dim_std_mean(self, dim, unbiased, keepdim);
10176}
10177::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
10178return wrapper_CompositeImplicitAutograd_names_dim_std_mean(self, dim, unbiased, keepdim);
10179}
10180::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
10181return wrapper_CompositeImplicitAutograd_correction_names_std_mean(self, dim, correction, keepdim);
10182}
10183at::Tensor std(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
10184return wrapper_CompositeImplicitAutograd_names_dim_std(self, dim, unbiased, keepdim);
10185}
10186at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
10187return wrapper_CompositeImplicitAutograd_names_out_std_out(self, dim, unbiased, keepdim, out);
10188}
10189at::Tensor & std_outf(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
10190return wrapper_CompositeImplicitAutograd_names_out_std_out(self, dim, unbiased, keepdim, out);
10191}
10192at::Tensor std(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
10193return wrapper_CompositeImplicitAutograd_correction_names_std(self, dim, correction, keepdim);
10194}
10195at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
10196return wrapper_CompositeImplicitAutograd_correction_names_out_std_out(self, dim, correction, keepdim, out);
10197}
10198at::Tensor & std_outf(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
10199return wrapper_CompositeImplicitAutograd_correction_names_out_std_out(self, dim, correction, keepdim, out);
10200}
10201at::Tensor prod(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
10202return wrapper_CompositeImplicitAutograd_dim_Dimname_prod(self, dim, keepdim, dtype);
10203}
10204at::Tensor & prod_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
10205return wrapper_CompositeImplicitAutograd_Dimname_out_prod_out(self, dim, keepdim, dtype, out);
10206}
10207at::Tensor & prod_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
10208return wrapper_CompositeImplicitAutograd_Dimname_out_prod_out(self, dim, keepdim, dtype, out);
10209}
10210at::Tensor tensordot(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
10211return wrapper_CompositeImplicitAutograd__tensordot(self, other, dims_self, dims_other);
10212}
10213at::Tensor tile(const at::Tensor & self, at::IntArrayRef dims) {
10214return wrapper_CompositeImplicitAutograd__tile(self, dims);
10215}
10216at::Tensor transpose(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) {
10217return wrapper_CompositeImplicitAutograd_Dimname_transpose(self, dim0, dim1);
10218}
10219at::Tensor one_hot(const at::Tensor & self, int64_t num_classes) {
10220return wrapper_CompositeImplicitAutograd__one_hot(self, num_classes);
10221}
10222at::Tensor fliplr(const at::Tensor & self) {
10223return wrapper_CompositeImplicitAutograd__fliplr(self);
10224}
10225at::Tensor flipud(const at::Tensor & self) {
10226return wrapper_CompositeImplicitAutograd__flipud(self);
10227}
10228at::Tensor trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
10229return wrapper_CompositeImplicitAutograd_x_trapezoid(y, x, dim);
10230}
10231at::Tensor trapezoid(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
10232return wrapper_CompositeImplicitAutograd_dx_trapezoid(y, dx, dim);
10233}
10234at::Tensor trapz(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
10235return wrapper_CompositeImplicitAutograd_x_trapz(y, x, dim);
10236}
10237at::Tensor trapz(const at::Tensor & y, double dx, int64_t dim) {
10238return wrapper_CompositeImplicitAutograd_dx_trapz(y, dx, dim);
10239}
10240at::Tensor triplet_margin_loss(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) {
10241return wrapper_CompositeImplicitAutograd__triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction);
10242}
10243at::Tensor fix(const at::Tensor & self) {
10244return wrapper_CompositeImplicitAutograd__fix(self);
10245}
10246at::Tensor & fix_out(at::Tensor & out, const at::Tensor & self) {
10247return wrapper_CompositeImplicitAutograd_out_fix_out(self, out);
10248}
10249at::Tensor & fix_outf(const at::Tensor & self, at::Tensor & out) {
10250return wrapper_CompositeImplicitAutograd_out_fix_out(self, out);
10251}
10252at::Tensor & fix_(at::Tensor & self) {
10253return wrapper_CompositeImplicitAutograd__fix_(self);
10254}
10255at::Tensor type_as(const at::Tensor & self, const at::Tensor & other) {
10256return wrapper_CompositeImplicitAutograd__type_as(self, other);
10257}
10258bool _has_compatible_shallow_copy_type(const at::Tensor & self, const at::Tensor & from) {
10259return wrapper_CompositeImplicitAutograd___has_compatible_shallow_copy_type(self, from);
10260}
10261at::Tensor vander(const at::Tensor & x, c10::optional<int64_t> N, bool increasing) {
10262return wrapper_CompositeImplicitAutograd__vander(x, N, increasing);
10263}
10264at::Tensor var(const at::Tensor & self, bool unbiased) {
10265return wrapper_CompositeImplicitAutograd__var(self, unbiased);
10266}
10267at::Tensor var(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
10268return wrapper_CompositeImplicitAutograd_dim_var(self, dim, unbiased, keepdim);
10269}
10270at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
10271return wrapper_CompositeImplicitAutograd_out_var_out(self, dim, unbiased, keepdim, out);
10272}
10273at::Tensor & var_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
10274return wrapper_CompositeImplicitAutograd_out_var_out(self, dim, unbiased, keepdim, out);
10275}
10276at::Tensor var(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
10277return wrapper_CompositeImplicitAutograd_names_dim_var(self, dim, unbiased, keepdim);
10278}
10279at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
10280return wrapper_CompositeImplicitAutograd_names_out_var_out(self, dim, unbiased, keepdim, out);
10281}
10282at::Tensor & var_outf(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
10283return wrapper_CompositeImplicitAutograd_names_out_var_out(self, dim, unbiased, keepdim, out);
10284}
10285at::Tensor var(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
10286return wrapper_CompositeImplicitAutograd_correction_names_var(self, dim, correction, keepdim);
10287}
10288at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
10289return wrapper_CompositeImplicitAutograd_correction_names_out_var_out(self, dim, correction, keepdim, out);
10290}
10291at::Tensor & var_outf(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
10292return wrapper_CompositeImplicitAutograd_correction_names_out_var_out(self, dim, correction, keepdim, out);
10293}
10294::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, bool unbiased) {
10295return wrapper_CompositeImplicitAutograd__var_mean(self, unbiased);
10296}
10297::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
10298return wrapper_CompositeImplicitAutograd_dim_var_mean(self, dim, unbiased, keepdim);
10299}
10300::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
10301return wrapper_CompositeImplicitAutograd_names_dim_var_mean(self, dim, unbiased, keepdim);
10302}
10303::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
10304return wrapper_CompositeImplicitAutograd_correction_names_var_mean(self, dim, correction, keepdim);
10305}
10306at::Tensor view_as(const at::Tensor & self, const at::Tensor & other) {
10307return wrapper_CompositeImplicitAutograd__view_as(self, other);
10308}
10309at::Tensor where(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) {
10310return wrapper_CompositeImplicitAutograd_ScalarSelf_where(condition, self, other);
10311}
10312at::Tensor where(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) {
10313return wrapper_CompositeImplicitAutograd_ScalarOther_where(condition, self, other);
10314}
10315at::Tensor where(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) {
10316return wrapper_CompositeImplicitAutograd_Scalar_where(condition, self, other);
10317}
10318::std::vector<at::Tensor> where(const at::Tensor & condition) {
10319return wrapper_CompositeImplicitAutograd__where(condition);
10320}
10321at::Tensor norm_except_dim(const at::Tensor & v, int64_t pow, int64_t dim) {
10322return wrapper_CompositeImplicitAutograd__norm_except_dim(v, pow, dim);
10323}
10324at::Tensor _weight_norm(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
10325return wrapper_CompositeImplicitAutograd___weight_norm(v, g, dim);
10326}
10327::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
10328return wrapper_CompositeImplicitAutograd___weight_norm_differentiable_backward(grad_w, saved_v, saved_g, saved_norms, dim);
10329}
10330at::Tensor _sparse_sum(const at::Tensor & self) {
10331return wrapper_CompositeImplicitAutograd___sparse_sum(self);
10332}
10333at::Tensor _sparse_sum(const at::Tensor & self, at::ScalarType dtype) {
10334return wrapper_CompositeImplicitAutograd_dtype__sparse_sum(self, dtype);
10335}
10336at::Tensor _sparse_sum(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) {
10337return wrapper_CompositeImplicitAutograd_dim_dtype__sparse_sum(self, dim, dtype);
10338}
10339at::Tensor _sparse_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
10340return wrapper_CompositeImplicitAutograd_int__sparse_softmax(self, dim, dtype);
10341}
10342at::Tensor _sparse_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
10343return wrapper_CompositeImplicitAutograd_Dimname__sparse_softmax(self, dim, dtype);
10344}
10345at::Tensor _sparse_log_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
10346return wrapper_CompositeImplicitAutograd_int__sparse_log_softmax(self, dim, dtype);
10347}
10348at::Tensor _sparse_log_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
10349return wrapper_CompositeImplicitAutograd_Dimname__sparse_log_softmax(self, dim, dtype);
10350}
10351at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
10352return wrapper_CompositeImplicitAutograd_names_ScalarOpt_dim_dtype_norm(self, p, dim, keepdim, dtype);
10353}
10354at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
10355return wrapper_CompositeImplicitAutograd_names_dtype_out_norm_out(self, p, dim, keepdim, dtype, out);
10356}
10357at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
10358return wrapper_CompositeImplicitAutograd_names_dtype_out_norm_out(self, p, dim, keepdim, dtype, out);
10359}
10360at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) {
10361return wrapper_CompositeImplicitAutograd_names_ScalarOpt_dim_norm(self, p, dim, keepdim);
10362}
10363at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) {
10364return wrapper_CompositeImplicitAutograd_names_out_norm_out(self, p, dim, keepdim, out);
10365}
10366at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out) {
10367return wrapper_CompositeImplicitAutograd_names_out_norm_out(self, p, dim, keepdim, out);
10368}
10369at::Tensor frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
10370return wrapper_CompositeImplicitAutograd_dim_frobenius_norm(self, dim, keepdim);
10371}
10372at::Tensor & frobenius_norm_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
10373return wrapper_CompositeImplicitAutograd_out_frobenius_norm_out(self, dim, keepdim, out);
10374}
10375at::Tensor & frobenius_norm_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
10376return wrapper_CompositeImplicitAutograd_out_frobenius_norm_out(self, dim, keepdim, out);
10377}
10378at::Tensor nuclear_norm(const at::Tensor & self, bool keepdim) {
10379return wrapper_CompositeImplicitAutograd__nuclear_norm(self, keepdim);
10380}
10381at::Tensor & nuclear_norm_out(at::Tensor & out, const at::Tensor & self, bool keepdim) {
10382return wrapper_CompositeImplicitAutograd_out_nuclear_norm_out(self, keepdim, out);
10383}
10384at::Tensor & nuclear_norm_outf(const at::Tensor & self, bool keepdim, at::Tensor & out) {
10385return wrapper_CompositeImplicitAutograd_out_nuclear_norm_out(self, keepdim, out);
10386}
10387at::Tensor nuclear_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
10388return wrapper_CompositeImplicitAutograd_dim_nuclear_norm(self, dim, keepdim);
10389}
10390at::Tensor & nuclear_norm_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
10391return wrapper_CompositeImplicitAutograd_dim_out_nuclear_norm_out(self, dim, keepdim, out);
10392}
10393at::Tensor & nuclear_norm_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
10394return wrapper_CompositeImplicitAutograd_dim_out_nuclear_norm_out(self, dim, keepdim, out);
10395}
10396at::Tensor positive(const at::Tensor & self) {
10397return wrapper_CompositeImplicitAutograd__positive(self);
10398}
10399at::Tensor subtract(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
10400return wrapper_CompositeImplicitAutograd_Tensor_subtract(self, other, alpha);
10401}
10402at::Tensor & subtract_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
10403return wrapper_CompositeImplicitAutograd_out_subtract_out(self, other, alpha, out);
10404}
10405at::Tensor & subtract_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
10406return wrapper_CompositeImplicitAutograd_out_subtract_out(self, other, alpha, out);
10407}
10408at::Tensor & subtract_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
10409return wrapper_CompositeImplicitAutograd_Tensor_subtract_(self, other, alpha);
10410}
10411at::Tensor subtract(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
10412return wrapper_CompositeImplicitAutograd_Scalar_subtract(self, other, alpha);
10413}
10414at::Tensor & subtract_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
10415return wrapper_CompositeImplicitAutograd_Scalar_subtract_(self, other, alpha);
10416}
10417at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
10418return wrapper_CompositeImplicitAutograd_comp_plain_value_size_sparse_compressed_tensor(compressed_indices, plain_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10419}
10420at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10421return wrapper_CompositeImplicitAutograd_comp_plain_value_size_sparse_compressed_tensor(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
10422}
10423at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
10424return wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_csr_tensor(crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10425}
10426at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10427return wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_csr_tensor(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
10428}
10429at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
10430return wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_csc_tensor(ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10431}
10432at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10433return wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_csc_tensor(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
10434}
10435at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
10436return wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_bsr_tensor(crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10437}
10438at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10439return wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_bsr_tensor(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
10440}
10441at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
10442return wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_bsc_tensor(ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10443}
10444at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10445return wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_bsc_tensor(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
10446}
10447at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::TensorOptions options) {
10448return wrapper_CompositeImplicitAutograd_comp_plain_value_sparse_compressed_tensor(compressed_indices, plain_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10449}
10450at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10451return wrapper_CompositeImplicitAutograd_comp_plain_value_sparse_compressed_tensor(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory);
10452}
10453at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
10454return wrapper_CompositeImplicitAutograd_crow_col_value_sparse_csr_tensor(crow_indices, col_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10455}
10456at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10457return wrapper_CompositeImplicitAutograd_crow_col_value_sparse_csr_tensor(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
10458}
10459at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
10460return wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_csc_tensor(ccol_indices, row_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10461}
10462at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10463return wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_csc_tensor(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
10464}
10465at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
10466return wrapper_CompositeImplicitAutograd_crow_col_value_sparse_bsr_tensor(crow_indices, col_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10467}
10468at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10469return wrapper_CompositeImplicitAutograd_crow_col_value_sparse_bsr_tensor(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
10470}
10471at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
10472return wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_bsc_tensor(ccol_indices, row_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10473}
10474at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10475return wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_bsc_tensor(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
10476}
10477at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
10478return wrapper_CompositeImplicitAutograd___sparse_compressed_tensor_unsafe(compressed_indices, plain_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10479}
10480at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10481return wrapper_CompositeImplicitAutograd___sparse_compressed_tensor_unsafe(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
10482}
10483at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
10484return wrapper_CompositeImplicitAutograd___sparse_csr_tensor_unsafe(crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10485}
10486at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10487return wrapper_CompositeImplicitAutograd___sparse_csr_tensor_unsafe(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
10488}
10489at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
10490return wrapper_CompositeImplicitAutograd___sparse_csc_tensor_unsafe(ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10491}
10492at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10493return wrapper_CompositeImplicitAutograd___sparse_csc_tensor_unsafe(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
10494}
10495at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
10496return wrapper_CompositeImplicitAutograd___sparse_bsr_tensor_unsafe(crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10497}
10498at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10499return wrapper_CompositeImplicitAutograd___sparse_bsr_tensor_unsafe(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
10500}
10501at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
10502return wrapper_CompositeImplicitAutograd___sparse_bsc_tensor_unsafe(ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10503}
10504at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10505return wrapper_CompositeImplicitAutograd___sparse_bsc_tensor_unsafe(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
10506}
10507at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) {
10508return wrapper_CompositeImplicitAutograd_indices_sparse_coo_tensor(indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10509}
10510at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10511return wrapper_CompositeImplicitAutograd_indices_sparse_coo_tensor(indices, values, dtype, layout, device, pin_memory);
10512}
10513at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
10514return wrapper_CompositeImplicitAutograd_indices_size_sparse_coo_tensor(indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10515}
10516at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10517return wrapper_CompositeImplicitAutograd_indices_size_sparse_coo_tensor(indices, values, size, dtype, layout, device, pin_memory);
10518}
10519at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
10520return wrapper_CompositeImplicitAutograd___sparse_coo_tensor_unsafe(indices, values, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10521}
10522at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10523return wrapper_CompositeImplicitAutograd___sparse_coo_tensor_unsafe(indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
10524}
10525at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options) {
10526return wrapper_CompositeImplicitAutograd___sparse_coo_tensor_unsafe(indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
10527}
10528at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
10529return wrapper_CompositeImplicitAutograd___sparse_coo_tensor_unsafe(indices, values, size, dtype, layout, device, pin_memory);
10530}
10531void _validate_sparse_coo_tensor_args(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size) {
10532return wrapper_CompositeImplicitAutograd___validate_sparse_coo_tensor_args(indices, values, size);
10533}
10534void _validate_sparse_compressed_tensor_args(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) {
10535return wrapper_CompositeImplicitAutograd___validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, values, size, layout);
10536}
10537void _validate_sparse_csr_tensor_args(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
10538return wrapper_CompositeImplicitAutograd___validate_sparse_csr_tensor_args(crow_indices, col_indices, values, size);
10539}
10540void _validate_sparse_csc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
10541return wrapper_CompositeImplicitAutograd___validate_sparse_csc_tensor_args(ccol_indices, row_indices, values, size);
10542}
10543void _validate_sparse_bsr_tensor_args(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
10544return wrapper_CompositeImplicitAutograd___validate_sparse_bsr_tensor_args(crow_indices, col_indices, values, size);
10545}
10546void _validate_sparse_bsc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
10547return wrapper_CompositeImplicitAutograd___validate_sparse_bsc_tensor_args(ccol_indices, row_indices, values, size);
10548}
10549::std::vector<at::Tensor> _to_cpu(at::TensorList tensors) {
10550return wrapper_CompositeImplicitAutograd___to_cpu(tensors);
10551}
10552at::Tensor to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
10553return wrapper_CompositeImplicitAutograd__to_dense(self, dtype);
10554}
10555at::Tensor to_dense_backward(const at::Tensor & grad, const at::Tensor & input) {
10556return wrapper_CompositeImplicitAutograd__to_dense_backward(grad, input);
10557}
10558at::Tensor coalesce(const at::Tensor & self) {
10559return wrapper_CompositeImplicitAutograd__coalesce(self);
10560}
10561::std::vector<at::Tensor> unbind(const at::Tensor & self, at::Dimname dim) {
10562return wrapper_CompositeImplicitAutograd_Dimname_unbind(self, dim);
10563}
10564at::Tensor to_mkldnn_backward(const at::Tensor & grad, const at::Tensor & input) {
10565return wrapper_CompositeImplicitAutograd__to_mkldnn_backward(grad, input);
10566}
10567at::Tensor fake_quantize_per_tensor_affine(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
10568return wrapper_CompositeImplicitAutograd__fake_quantize_per_tensor_affine(self, scale, zero_point, quant_min, quant_max);
10569}
10570at::Tensor fake_quantize_per_tensor_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
10571return wrapper_CompositeImplicitAutograd_tensor_qparams_fake_quantize_per_tensor_affine(self, scale, zero_point, quant_min, quant_max);
10572}
10573at::Tensor fake_quantize_per_tensor_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask) {
10574return wrapper_CompositeImplicitAutograd__fake_quantize_per_tensor_affine_cachemask_backward(grad, mask);
10575}
10576at::Tensor fake_quantize_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
10577return wrapper_CompositeImplicitAutograd__fake_quantize_per_channel_affine(self, scale, zero_point, axis, quant_min, quant_max);
10578}
10579at::Tensor fake_quantize_per_channel_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask) {
10580return wrapper_CompositeImplicitAutograd__fake_quantize_per_channel_affine_cachemask_backward(grad, mask);
10581}
10582at::Tensor fused_moving_avg_obs_fake_quant(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
10583return wrapper_CompositeImplicitAutograd__fused_moving_avg_obs_fake_quant(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
10584}
10585::std::tuple<double,int64_t> _choose_qparams_per_tensor(const at::Tensor & self, bool reduce_range) {
10586return wrapper_CompositeImplicitAutograd___choose_qparams_per_tensor(self, reduce_range);
10587}
10588at::Tensor _saturate_weight_to_fp16(const at::Tensor & weight) {
10589return wrapper_CompositeImplicitAutograd___saturate_weight_to_fp16(weight);
10590}
10591::std::tuple<at::Tensor,at::Tensor> choose_qparams_optimized(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) {
10592return wrapper_CompositeImplicitAutograd__choose_qparams_optimized(input, numel, n_bins, ratio, bit_width);
10593}
10594at::Tensor _autocast_to_reduced_precision(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) {
10595return wrapper_CompositeImplicitAutograd___autocast_to_reduced_precision(self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
10596}
10597at::Tensor _autocast_to_full_precision(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) {
10598return wrapper_CompositeImplicitAutograd___autocast_to_full_precision(self, cuda_enabled, cpu_enabled);
10599}
10600at::Tensor to(const at::Tensor & self, at::TensorOptions options, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
10601return wrapper_CompositeImplicitAutograd_dtype_layout_to(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, copy, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
10602}
10603at::Tensor to(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
10604return wrapper_CompositeImplicitAutograd_dtype_layout_to(self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
10605}
10606at::Tensor to(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
10607return wrapper_CompositeImplicitAutograd_device_to(self, device, dtype, non_blocking, copy, memory_format);
10608}
10609at::Tensor to(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
10610return wrapper_CompositeImplicitAutograd_dtype_to(self, dtype, non_blocking, copy, memory_format);
10611}
10612at::Tensor to(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
10613return wrapper_CompositeImplicitAutograd_other_to(self, other, non_blocking, copy, memory_format);
10614}
10615::std::vector<at::Tensor> meshgrid(at::TensorList tensors) {
10616return wrapper_CompositeImplicitAutograd__meshgrid(tensors);
10617}
10618::std::vector<at::Tensor> meshgrid(at::TensorList tensors, c10::string_view indexing) {
10619return wrapper_CompositeImplicitAutograd_indexing_meshgrid(tensors, indexing);
10620}
10621at::Tensor cartesian_prod(at::TensorList tensors) {
10622return wrapper_CompositeImplicitAutograd__cartesian_prod(tensors);
10623}
10624at::Tensor combinations(const at::Tensor & self, int64_t r, bool with_replacement) {
10625return wrapper_CompositeImplicitAutograd__combinations(self, r, with_replacement);
10626}
10627at::Scalar item(const at::Tensor & self) {
10628return wrapper_CompositeImplicitAutograd__item(self);
10629}
10630at::ScalarType result_type(const at::Tensor & tensor, const at::Tensor & other) {
10631return wrapper_CompositeImplicitAutograd_Tensor_result_type(tensor, other);
10632}
10633at::ScalarType result_type(const at::Tensor & tensor, const at::Scalar & other) {
10634return wrapper_CompositeImplicitAutograd_Scalar_result_type(tensor, other);
10635}
10636at::ScalarType result_type(const at::Scalar & scalar, const at::Tensor & tensor) {
10637return wrapper_CompositeImplicitAutograd_Scalar_Tensor_result_type(scalar, tensor);
10638}
10639at::ScalarType result_type(const at::Scalar & scalar1, const at::Scalar & scalar2) {
10640return wrapper_CompositeImplicitAutograd_Scalar_Scalar_result_type(scalar1, scalar2);
10641}
10642bool can_cast(at::ScalarType from, at::ScalarType to) {
10643return wrapper_CompositeImplicitAutograd__can_cast(from, to);
10644}
10645at::ScalarType promote_types(at::ScalarType type1, at::ScalarType type2) {
10646return wrapper_CompositeImplicitAutograd__promote_types(type1, type2);
10647}
10648::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
10649return wrapper_CompositeImplicitAutograd___thnn_fused_lstm_cell_backward(grad_hy, grad_cy, cx, cy, workspace, has_bias);
10650}
10651::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_lstm_cell_backward(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) {
10652return wrapper_CompositeImplicitAutograd___thnn_differentiable_lstm_cell_backward(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy);
10653}
10654::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
10655return wrapper_CompositeImplicitAutograd___thnn_differentiable_gru_cell_backward(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias);
10656}
10657::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
10658return wrapper_CompositeImplicitAutograd_input_lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
10659}
10660::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
10661return wrapper_CompositeImplicitAutograd_data_lstm(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
10662}
10663::std::tuple<at::Tensor,at::Tensor> gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
10664return wrapper_CompositeImplicitAutograd_input_gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
10665}
10666::std::tuple<at::Tensor,at::Tensor> gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
10667return wrapper_CompositeImplicitAutograd_data_gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
10668}
10669::std::tuple<at::Tensor,at::Tensor> rnn_tanh(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
10670return wrapper_CompositeImplicitAutograd_input_rnn_tanh(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
10671}
10672::std::tuple<at::Tensor,at::Tensor> rnn_tanh(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
10673return wrapper_CompositeImplicitAutograd_data_rnn_tanh(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
10674}
10675::std::tuple<at::Tensor,at::Tensor> rnn_relu(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
10676return wrapper_CompositeImplicitAutograd_input_rnn_relu(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
10677}
10678::std::tuple<at::Tensor,at::Tensor> rnn_relu(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
10679return wrapper_CompositeImplicitAutograd_data_rnn_relu(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
10680}
10681::std::tuple<at::Tensor,at::Tensor> lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
10682return wrapper_CompositeImplicitAutograd__lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
10683}
10684at::Tensor gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
10685return wrapper_CompositeImplicitAutograd__gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
10686}
10687at::Tensor rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
10688return wrapper_CompositeImplicitAutograd__rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
10689}
10690at::Tensor rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
10691return wrapper_CompositeImplicitAutograd__rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
10692}
10693::std::tuple<at::Tensor,at::Tensor> quantized_lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
10694return wrapper_CompositeImplicitAutograd__quantized_lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
10695}
10696at::Tensor quantized_gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
10697return wrapper_CompositeImplicitAutograd__quantized_gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
10698}
10699at::Tensor quantized_rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
10700return wrapper_CompositeImplicitAutograd__quantized_rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
10701}
10702at::Tensor quantized_rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
10703return wrapper_CompositeImplicitAutograd__quantized_rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
10704}
10705at::Tensor _pack_padded_sequence_backward(const at::Tensor & grad, at::IntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
10706return wrapper_CompositeImplicitAutograd___pack_padded_sequence_backward(grad, c10::fromIntArrayRefSlow(input_size), batch_sizes, batch_first);
10707}
10708at::Tensor _pack_padded_sequence_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
10709return wrapper_CompositeImplicitAutograd___pack_padded_sequence_backward(grad, input_size, batch_sizes, batch_first);
10710}
10711::std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) {
10712return wrapper_CompositeImplicitAutograd___pad_packed_sequence(data, batch_sizes, batch_first, padding_value, total_length);
10713}
10714at::Tensor & set_(at::Tensor & self, const at::Tensor & source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride) {
10715return wrapper_CompositeImplicitAutograd_source_Tensor_storage_offset_set_(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
10716}
10717at::Tensor & set__symint(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
10718return wrapper_CompositeImplicitAutograd_source_Tensor_storage_offset_set_(self, source, storage_offset, size, stride);
10719}
10720at::Tensor index_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
10721return wrapper_CompositeImplicitAutograd_dimname_index_add(self, dim, index, source, alpha);
10722}
10723at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
10724return wrapper_CompositeImplicitAutograd_Dimname_Scalar_index_fill_(self, dim, index, value);
10725}
10726at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
10727return wrapper_CompositeImplicitAutograd_Dimname_Scalar_index_fill(self, dim, index, value);
10728}
10729at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
10730return wrapper_CompositeImplicitAutograd_Dimname_Tensor_index_fill_(self, dim, index, value);
10731}
10732at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
10733return wrapper_CompositeImplicitAutograd_Dimname_Tensor_index_fill(self, dim, index, value);
10734}
10735at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
10736return wrapper_CompositeImplicitAutograd_dimname_src_scatter(self, dim, index, src);
10737}
10738at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
10739return wrapper_CompositeImplicitAutograd_dimname_value_scatter(self, dim, index, value);
10740}
10741at::Tensor scatter_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
10742return wrapper_CompositeImplicitAutograd_dimname_scatter_add(self, dim, index, src);
10743}
10744at::Tensor & bitwise_and_(at::Tensor & self, const at::Scalar & other) {
10745return wrapper_CompositeImplicitAutograd_Scalar_bitwise_and_(self, other);
10746}
10747at::Tensor __and__(const at::Tensor & self, const at::Scalar & other) {
10748return wrapper_CompositeImplicitAutograd_Scalar___and__(self, other);
10749}
10750at::Tensor & __iand__(at::Tensor & self, const at::Scalar & other) {
10751return wrapper_CompositeImplicitAutograd_Scalar___iand__(self, other);
10752}
10753at::Tensor __and__(const at::Tensor & self, const at::Tensor & other) {
10754return wrapper_CompositeImplicitAutograd_Tensor___and__(self, other);
10755}
10756at::Tensor & __iand__(at::Tensor & self, const at::Tensor & other) {
10757return wrapper_CompositeImplicitAutograd_Tensor___iand__(self, other);
10758}
10759at::Tensor bitwise_or(const at::Tensor & self, const at::Scalar & other) {
10760return wrapper_CompositeImplicitAutograd_Scalar_bitwise_or(self, other);
10761}
10762at::Tensor & bitwise_or_(at::Tensor & self, const at::Scalar & other) {
10763return wrapper_CompositeImplicitAutograd_Scalar_bitwise_or_(self, other);
10764}
10765at::Tensor __or__(const at::Tensor & self, const at::Scalar & other) {
10766return wrapper_CompositeImplicitAutograd_Scalar___or__(self, other);
10767}
10768at::Tensor & __ior__(at::Tensor & self, const at::Scalar & other) {
10769return wrapper_CompositeImplicitAutograd_Scalar___ior__(self, other);
10770}
10771at::Tensor __or__(const at::Tensor & self, const at::Tensor & other) {
10772return wrapper_CompositeImplicitAutograd_Tensor___or__(self, other);
10773}
10774at::Tensor & __ior__(at::Tensor & self, const at::Tensor & other) {
10775return wrapper_CompositeImplicitAutograd_Tensor___ior__(self, other);
10776}
10777at::Tensor bitwise_xor(const at::Tensor & self, const at::Scalar & other) {
10778return wrapper_CompositeImplicitAutograd_Scalar_bitwise_xor(self, other);
10779}
10780at::Tensor & bitwise_xor_(at::Tensor & self, const at::Scalar & other) {
10781return wrapper_CompositeImplicitAutograd_Scalar_bitwise_xor_(self, other);
10782}
10783at::Tensor __xor__(const at::Tensor & self, const at::Scalar & other) {
10784return wrapper_CompositeImplicitAutograd_Scalar___xor__(self, other);
10785}
10786at::Tensor & __ixor__(at::Tensor & self, const at::Scalar & other) {
10787return wrapper_CompositeImplicitAutograd_Scalar___ixor__(self, other);
10788}
10789at::Tensor __xor__(const at::Tensor & self, const at::Tensor & other) {
10790return wrapper_CompositeImplicitAutograd_Tensor___xor__(self, other);
10791}
10792at::Tensor & __ixor__(at::Tensor & self, const at::Tensor & other) {
10793return wrapper_CompositeImplicitAutograd_Tensor___ixor__(self, other);
10794}
10795at::Tensor diag(const at::Tensor & self, int64_t diagonal) {
10796return wrapper_CompositeImplicitAutograd__diag(self, diagonal);
10797}
10798at::Tensor & diag_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal) {
10799return wrapper_CompositeImplicitAutograd_out_diag_out(self, diagonal, out);
10800}
10801at::Tensor & diag_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
10802return wrapper_CompositeImplicitAutograd_out_diag_out(self, diagonal, out);
10803}
10804at::Tensor cross(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim) {
10805return wrapper_CompositeImplicitAutograd__cross(self, other, dim);
10806}
10807at::Tensor & cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim) {
10808return wrapper_CompositeImplicitAutograd_out_cross_out(self, other, dim, out);
10809}
10810at::Tensor & cross_outf(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim, at::Tensor & out) {
10811return wrapper_CompositeImplicitAutograd_out_cross_out(self, other, dim, out);
10812}
10813at::Tensor trace_backward(const at::Tensor & grad, at::IntArrayRef sizes) {
10814return wrapper_CompositeImplicitAutograd__trace_backward(grad, c10::fromIntArrayRefSlow(sizes));
10815}
10816at::Tensor trace_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef sizes) {
10817return wrapper_CompositeImplicitAutograd__trace_backward(grad, sizes);
10818}
10819at::Tensor not_equal(const at::Tensor & self, const at::Scalar & other) {
10820return wrapper_CompositeImplicitAutograd_Scalar_not_equal(self, other);
10821}
10822at::Tensor & not_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10823return wrapper_CompositeImplicitAutograd_Scalar_out_not_equal_out(self, other, out);
10824}
10825at::Tensor & not_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10826return wrapper_CompositeImplicitAutograd_Scalar_out_not_equal_out(self, other, out);
10827}
10828at::Tensor & not_equal_(at::Tensor & self, const at::Scalar & other) {
10829return wrapper_CompositeImplicitAutograd_Scalar_not_equal_(self, other);
10830}
10831at::Tensor not_equal(const at::Tensor & self, const at::Tensor & other) {
10832return wrapper_CompositeImplicitAutograd_Tensor_not_equal(self, other);
10833}
10834at::Tensor & not_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10835return wrapper_CompositeImplicitAutograd_Tensor_out_not_equal_out(self, other, out);
10836}
10837at::Tensor & not_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10838return wrapper_CompositeImplicitAutograd_Tensor_out_not_equal_out(self, other, out);
10839}
10840at::Tensor & not_equal_(at::Tensor & self, const at::Tensor & other) {
10841return wrapper_CompositeImplicitAutograd_Tensor_not_equal_(self, other);
10842}
10843at::Tensor greater_equal(const at::Tensor & self, const at::Scalar & other) {
10844return wrapper_CompositeImplicitAutograd_Scalar_greater_equal(self, other);
10845}
10846at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10847return wrapper_CompositeImplicitAutograd_Scalar_out_greater_equal_out(self, other, out);
10848}
10849at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10850return wrapper_CompositeImplicitAutograd_Scalar_out_greater_equal_out(self, other, out);
10851}
10852at::Tensor & greater_equal_(at::Tensor & self, const at::Scalar & other) {
10853return wrapper_CompositeImplicitAutograd_Scalar_greater_equal_(self, other);
10854}
10855at::Tensor greater_equal(const at::Tensor & self, const at::Tensor & other) {
10856return wrapper_CompositeImplicitAutograd_Tensor_greater_equal(self, other);
10857}
10858at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10859return wrapper_CompositeImplicitAutograd_Tensor_out_greater_equal_out(self, other, out);
10860}
10861at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10862return wrapper_CompositeImplicitAutograd_Tensor_out_greater_equal_out(self, other, out);
10863}
10864at::Tensor & greater_equal_(at::Tensor & self, const at::Tensor & other) {
10865return wrapper_CompositeImplicitAutograd_Tensor_greater_equal_(self, other);
10866}
10867at::Tensor less_equal(const at::Tensor & self, const at::Scalar & other) {
10868return wrapper_CompositeImplicitAutograd_Scalar_less_equal(self, other);
10869}
10870at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10871return wrapper_CompositeImplicitAutograd_Scalar_out_less_equal_out(self, other, out);
10872}
10873at::Tensor & less_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10874return wrapper_CompositeImplicitAutograd_Scalar_out_less_equal_out(self, other, out);
10875}
10876at::Tensor & less_equal_(at::Tensor & self, const at::Scalar & other) {
10877return wrapper_CompositeImplicitAutograd_Scalar_less_equal_(self, other);
10878}
10879at::Tensor less_equal(const at::Tensor & self, const at::Tensor & other) {
10880return wrapper_CompositeImplicitAutograd_Tensor_less_equal(self, other);
10881}
10882at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10883return wrapper_CompositeImplicitAutograd_Tensor_out_less_equal_out(self, other, out);
10884}
10885at::Tensor & less_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10886return wrapper_CompositeImplicitAutograd_Tensor_out_less_equal_out(self, other, out);
10887}
10888at::Tensor & less_equal_(at::Tensor & self, const at::Tensor & other) {
10889return wrapper_CompositeImplicitAutograd_Tensor_less_equal_(self, other);
10890}
10891at::Tensor greater(const at::Tensor & self, const at::Scalar & other) {
10892return wrapper_CompositeImplicitAutograd_Scalar_greater(self, other);
10893}
10894at::Tensor & greater_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10895return wrapper_CompositeImplicitAutograd_Scalar_out_greater_out(self, other, out);
10896}
10897at::Tensor & greater_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10898return wrapper_CompositeImplicitAutograd_Scalar_out_greater_out(self, other, out);
10899}
10900at::Tensor & greater_(at::Tensor & self, const at::Scalar & other) {
10901return wrapper_CompositeImplicitAutograd_Scalar_greater_(self, other);
10902}
10903at::Tensor greater(const at::Tensor & self, const at::Tensor & other) {
10904return wrapper_CompositeImplicitAutograd_Tensor_greater(self, other);
10905}
10906at::Tensor & greater_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10907return wrapper_CompositeImplicitAutograd_Tensor_out_greater_out(self, other, out);
10908}
10909at::Tensor & greater_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10910return wrapper_CompositeImplicitAutograd_Tensor_out_greater_out(self, other, out);
10911}
10912at::Tensor & greater_(at::Tensor & self, const at::Tensor & other) {
10913return wrapper_CompositeImplicitAutograd_Tensor_greater_(self, other);
10914}
10915at::Tensor less(const at::Tensor & self, const at::Scalar & other) {
10916return wrapper_CompositeImplicitAutograd_Scalar_less(self, other);
10917}
10918at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10919return wrapper_CompositeImplicitAutograd_Scalar_out_less_out(self, other, out);
10920}
10921at::Tensor & less_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10922return wrapper_CompositeImplicitAutograd_Scalar_out_less_out(self, other, out);
10923}
10924at::Tensor & less_(at::Tensor & self, const at::Scalar & other) {
10925return wrapper_CompositeImplicitAutograd_Scalar_less_(self, other);
10926}
10927at::Tensor less(const at::Tensor & self, const at::Tensor & other) {
10928return wrapper_CompositeImplicitAutograd_Tensor_less(self, other);
10929}
10930at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10931return wrapper_CompositeImplicitAutograd_Tensor_out_less_out(self, other, out);
10932}
10933at::Tensor & less_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10934return wrapper_CompositeImplicitAutograd_Tensor_out_less_out(self, other, out);
10935}
10936at::Tensor & less_(at::Tensor & self, const at::Tensor & other) {
10937return wrapper_CompositeImplicitAutograd_Tensor_less_(self, other);
10938}
10939at::Tensor take_along_dim(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim) {
10940return wrapper_CompositeImplicitAutograd__take_along_dim(self, indices, dim);
10941}
10942at::Tensor & take_along_dim_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim) {
10943return wrapper_CompositeImplicitAutograd_out_take_along_dim_out(self, indices, dim, out);
10944}
10945at::Tensor & take_along_dim_outf(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim, at::Tensor & out) {
10946return wrapper_CompositeImplicitAutograd_out_take_along_dim_out(self, indices, dim, out);
10947}
10948at::Tensor index_select(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
10949return wrapper_CompositeImplicitAutograd_dimname_index_select(self, dim, index);
10950}
10951at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
10952return wrapper_CompositeImplicitAutograd_dimname_out_index_select_out(self, dim, index, out);
10953}
10954at::Tensor & index_select_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) {
10955return wrapper_CompositeImplicitAutograd_dimname_out_index_select_out(self, dim, index, out);
10956}
10957at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
10958return wrapper_CompositeImplicitAutograd__index_select_backward(grad, c10::fromIntArrayRefSlow(self_sizes), dim, index);
10959}
10960at::Tensor index_select_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
10961return wrapper_CompositeImplicitAutograd__index_select_backward(grad, self_sizes, dim, index);
10962}
10963at::Tensor masked_select_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) {
10964return wrapper_CompositeImplicitAutograd__masked_select_backward(grad, input, mask);
10965}
10966::std::vector<at::Tensor> nonzero_numpy(const at::Tensor & self) {
10967return wrapper_CompositeImplicitAutograd__nonzero_numpy(self);
10968}
10969at::Tensor argwhere(const at::Tensor & self) {
10970return wrapper_CompositeImplicitAutograd__argwhere(self);
10971}
10972at::Tensor gather_backward(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
10973return wrapper_CompositeImplicitAutograd__gather_backward(grad, self, dim, index, sparse_grad);
10974}
10975at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) {
10976return wrapper_CompositeImplicitAutograd_dimname_gather(self, dim, index, sparse_grad);
10977}
10978at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) {
10979return wrapper_CompositeImplicitAutograd_dimname_out_gather_out(self, dim, index, sparse_grad, out);
10980}
10981at::Tensor & gather_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
10982return wrapper_CompositeImplicitAutograd_dimname_out_gather_out(self, dim, index, sparse_grad, out);
10983}
10984at::Tensor _gather_sparse_backward(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) {
10985return wrapper_CompositeImplicitAutograd___gather_sparse_backward(self, dim, index, grad);
10986}
10987at::Tensor cross_entropy_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, double label_smoothing) {
10988return wrapper_CompositeImplicitAutograd__cross_entropy_loss(self, target, weight, reduction, ignore_index, label_smoothing);
10989}
10990at::Tensor cross_entropy_loss_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) {
10991return wrapper_CompositeImplicitAutograd__cross_entropy_loss(self, target, weight, reduction, ignore_index, label_smoothing);
10992}
10993at::Tensor linalg_vander(const at::Tensor & x, c10::optional<int64_t> N) {
10994return wrapper_CompositeImplicitAutograd__linalg_vander(x, N);
10995}
10996::std::tuple<at::Tensor,at::Tensor,at::Tensor> svd(const at::Tensor & self, bool some, bool compute_uv) {
10997return wrapper_CompositeImplicitAutograd__svd(self, some, compute_uv);
10998}
10999::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & V, const at::Tensor & self, bool some, bool compute_uv) {
11000return wrapper_CompositeImplicitAutograd_U_svd_out(self, some, compute_uv, U, S, V);
11001}
11002::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> svd_outf(const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) {
11003return wrapper_CompositeImplicitAutograd_U_svd_out(self, some, compute_uv, U, S, V);
11004}
11005at::Tensor swapaxes(const at::Tensor & self, int64_t axis0, int64_t axis1) {
11006return wrapper_CompositeImplicitAutograd__swapaxes(self, axis0, axis1);
11007}
11008at::Tensor & swapaxes_(at::Tensor & self, int64_t axis0, int64_t axis1) {
11009return wrapper_CompositeImplicitAutograd__swapaxes_(self, axis0, axis1);
11010}
11011at::Tensor swapdims(const at::Tensor & self, int64_t dim0, int64_t dim1) {
11012return wrapper_CompositeImplicitAutograd__swapdims(self, dim0, dim1);
11013}
11014at::Tensor & swapdims_(at::Tensor & self, int64_t dim0, int64_t dim1) {
11015return wrapper_CompositeImplicitAutograd__swapdims_(self, dim0, dim1);
11016}
11017::std::tuple<at::Tensor,at::Tensor> qr(const at::Tensor & self, bool some) {
11018return wrapper_CompositeImplicitAutograd__qr(self, some);
11019}
11020::std::tuple<at::Tensor &,at::Tensor &> qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & self, bool some) {
11021return wrapper_CompositeImplicitAutograd_Q_qr_out(self, some, Q, R);
11022}
11023::std::tuple<at::Tensor &,at::Tensor &> qr_outf(const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R) {
11024return wrapper_CompositeImplicitAutograd_Q_qr_out(self, some, Q, R);
11025}
11026at::Tensor orgqr(const at::Tensor & self, const at::Tensor & input2) {
11027return wrapper_CompositeImplicitAutograd__orgqr(self, input2);
11028}
11029at::Tensor & orgqr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2) {
11030return wrapper_CompositeImplicitAutograd_out_orgqr_out(self, input2, out);
11031}
11032at::Tensor & orgqr_outf(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) {
11033return wrapper_CompositeImplicitAutograd_out_orgqr_out(self, input2, out);
11034}
11035::std::tuple<at::Tensor,at::Tensor,at::Tensor> _lu_with_info(const at::Tensor & self, bool pivot, bool check_errors) {
11036return wrapper_CompositeImplicitAutograd___lu_with_info(self, pivot, check_errors);
11037}
11038at::Tensor lu_solve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
11039return wrapper_CompositeImplicitAutograd__lu_solve(self, LU_data, LU_pivots);
11040}
11041at::Tensor & lu_solve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
11042return wrapper_CompositeImplicitAutograd_out_lu_solve_out(self, LU_data, LU_pivots, out);
11043}
11044at::Tensor & lu_solve_outf(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) {
11045return wrapper_CompositeImplicitAutograd_out_lu_solve_out(self, LU_data, LU_pivots, out);
11046}
11047at::Tensor arctan2(const at::Tensor & self, const at::Tensor & other) {
11048return wrapper_CompositeImplicitAutograd__arctan2(self, other);
11049}
11050at::Tensor & arctan2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11051return wrapper_CompositeImplicitAutograd_out_arctan2_out(self, other, out);
11052}
11053at::Tensor & arctan2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11054return wrapper_CompositeImplicitAutograd_out_arctan2_out(self, other, out);
11055}
11056at::Tensor & arctan2_(at::Tensor & self, const at::Tensor & other) {
11057return wrapper_CompositeImplicitAutograd__arctan2_(self, other);
11058}
11059::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
11060return wrapper_CompositeImplicitAutograd__histogramdd(self, bins, range, weight, density);
11061}
11062::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
11063return wrapper_CompositeImplicitAutograd_int_bins_histogramdd(self, bins, range, weight, density);
11064}
11065::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd(const at::Tensor & self, at::TensorList bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
11066return wrapper_CompositeImplicitAutograd_TensorList_bins_histogramdd(self, bins, range, weight, density);
11067}
11068at::Tensor max(const at::Tensor & self, const at::Tensor & other) {
11069return wrapper_CompositeImplicitAutograd_other_max(self, other);
11070}
11071at::Tensor & max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11072return wrapper_CompositeImplicitAutograd_out_max_out(self, other, out);
11073}
11074at::Tensor & max_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11075return wrapper_CompositeImplicitAutograd_out_max_out(self, other, out);
11076}
11077at::Tensor min(const at::Tensor & self, const at::Tensor & other) {
11078return wrapper_CompositeImplicitAutograd_other_min(self, other);
11079}
11080at::Tensor & min_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11081return wrapper_CompositeImplicitAutograd_out_min_out(self, other, out);
11082}
11083at::Tensor & min_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11084return wrapper_CompositeImplicitAutograd_out_min_out(self, other, out);
11085}
11086at::Tensor quantile(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
11087return wrapper_CompositeImplicitAutograd__quantile(self, q, dim, keepdim, interpolation);
11088}
11089at::Tensor & quantile_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
11090return wrapper_CompositeImplicitAutograd_out_quantile_out(self, q, dim, keepdim, interpolation, out);
11091}
11092at::Tensor & quantile_outf(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
11093return wrapper_CompositeImplicitAutograd_out_quantile_out(self, q, dim, keepdim, interpolation, out);
11094}
11095at::Tensor quantile(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
11096return wrapper_CompositeImplicitAutograd_scalar_quantile(self, q, dim, keepdim, interpolation);
11097}
11098at::Tensor & quantile_out(at::Tensor & out, const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
11099return wrapper_CompositeImplicitAutograd_scalar_out_quantile_out(self, q, dim, keepdim, interpolation, out);
11100}
11101at::Tensor & quantile_outf(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
11102return wrapper_CompositeImplicitAutograd_scalar_out_quantile_out(self, q, dim, keepdim, interpolation, out);
11103}
11104at::Tensor nanquantile(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
11105return wrapper_CompositeImplicitAutograd__nanquantile(self, q, dim, keepdim, interpolation);
11106}
11107at::Tensor & nanquantile_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
11108return wrapper_CompositeImplicitAutograd_out_nanquantile_out(self, q, dim, keepdim, interpolation, out);
11109}
11110at::Tensor & nanquantile_outf(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
11111return wrapper_CompositeImplicitAutograd_out_nanquantile_out(self, q, dim, keepdim, interpolation, out);
11112}
11113at::Tensor nanquantile(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
11114return wrapper_CompositeImplicitAutograd_scalar_nanquantile(self, q, dim, keepdim, interpolation);
11115}
11116at::Tensor & nanquantile_out(at::Tensor & out, const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
11117return wrapper_CompositeImplicitAutograd_scalar_out_nanquantile_out(self, q, dim, keepdim, interpolation, out);
11118}
11119at::Tensor & nanquantile_outf(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
11120return wrapper_CompositeImplicitAutograd_scalar_out_nanquantile_out(self, q, dim, keepdim, interpolation, out);
11121}
11122::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, at::Dimname dim, bool descending) {
11123return wrapper_CompositeImplicitAutograd_dimname_sort(self, dim, descending);
11124}
11125::std::tuple<at::Tensor &,at::Tensor &> sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool descending) {
11126return wrapper_CompositeImplicitAutograd_dimname_values_sort_out(self, dim, descending, values, indices);
11127}
11128::std::tuple<at::Tensor &,at::Tensor &> sort_outf(const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
11129return wrapper_CompositeImplicitAutograd_dimname_values_sort_out(self, dim, descending, values, indices);
11130}
11131::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending) {
11132return wrapper_CompositeImplicitAutograd_dimname_stable_sort(self, stable, dim, descending);
11133}
11134::std::tuple<at::Tensor &,at::Tensor &> sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending) {
11135return wrapper_CompositeImplicitAutograd_dimname_values_stable_sort_out(self, stable, dim, descending, values, indices);
11136}
11137::std::tuple<at::Tensor &,at::Tensor &> sort_outf(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
11138return wrapper_CompositeImplicitAutograd_dimname_values_stable_sort_out(self, stable, dim, descending, values, indices);
11139}
11140at::Tensor msort(const at::Tensor & self) {
11141return wrapper_CompositeImplicitAutograd__msort(self);
11142}
11143at::Tensor & msort_out(at::Tensor & out, const at::Tensor & self) {
11144return wrapper_CompositeImplicitAutograd_out_msort_out(self, out);
11145}
11146at::Tensor & msort_outf(const at::Tensor & self, at::Tensor & out) {
11147return wrapper_CompositeImplicitAutograd_out_msort_out(self, out);
11148}
11149at::Tensor argsort(const at::Tensor & self, int64_t dim, bool descending) {
11150return wrapper_CompositeImplicitAutograd__argsort(self, dim, descending);
11151}
11152at::Tensor argsort(const at::Tensor & self, at::Dimname dim, bool descending) {
11153return wrapper_CompositeImplicitAutograd_dimname_argsort(self, dim, descending);
11154}
11155at::Tensor float_power(const at::Tensor & self, const at::Tensor & exponent) {
11156return wrapper_CompositeImplicitAutograd_Tensor_Tensor_float_power(self, exponent);
11157}
11158at::Tensor & float_power_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent) {
11159return wrapper_CompositeImplicitAutograd_Tensor_Tensor_out_float_power_out(self, exponent, out);
11160}
11161at::Tensor & float_power_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
11162return wrapper_CompositeImplicitAutograd_Tensor_Tensor_out_float_power_out(self, exponent, out);
11163}
11164at::Tensor & float_power_(at::Tensor & self, const at::Tensor & exponent) {
11165return wrapper_CompositeImplicitAutograd_Tensor_float_power_(self, exponent);
11166}
11167at::Tensor float_power(const at::Scalar & self, const at::Tensor & exponent) {
11168return wrapper_CompositeImplicitAutograd_Scalar_float_power(self, exponent);
11169}
11170at::Tensor & float_power_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent) {
11171return wrapper_CompositeImplicitAutograd_Scalar_out_float_power_out(self, exponent, out);
11172}
11173at::Tensor & float_power_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
11174return wrapper_CompositeImplicitAutograd_Scalar_out_float_power_out(self, exponent, out);
11175}
11176at::Tensor float_power(const at::Tensor & self, const at::Scalar & exponent) {
11177return wrapper_CompositeImplicitAutograd_Tensor_Scalar_float_power(self, exponent);
11178}
11179at::Tensor & float_power_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) {
11180return wrapper_CompositeImplicitAutograd_Tensor_Scalar_out_float_power_out(self, exponent, out);
11181}
11182at::Tensor & float_power_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
11183return wrapper_CompositeImplicitAutograd_Tensor_Scalar_out_float_power_out(self, exponent, out);
11184}
11185at::Tensor & float_power_(at::Tensor & self, const at::Scalar & exponent) {
11186return wrapper_CompositeImplicitAutograd_Scalar_float_power_(self, exponent);
11187}
11188at::Tensor l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
11189return wrapper_CompositeImplicitAutograd__l1_loss(self, target, reduction);
11190}
11191at::Tensor multilabel_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
11192return wrapper_CompositeImplicitAutograd__multilabel_margin_loss(self, target, reduction);
11193}
11194at::Tensor & multilabel_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
11195return wrapper_CompositeImplicitAutograd_out_multilabel_margin_loss_out(self, target, reduction, out);
11196}
11197at::Tensor & multilabel_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
11198return wrapper_CompositeImplicitAutograd_out_multilabel_margin_loss_out(self, target, reduction, out);
11199}
11200at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
11201return wrapper_CompositeImplicitAutograd__nll_loss(self, target, weight, reduction, ignore_index);
11202}
11203at::Tensor nll_loss_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
11204return wrapper_CompositeImplicitAutograd__nll_loss(self, target, weight, reduction, ignore_index);
11205}
11206at::Tensor & nll_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
11207return wrapper_CompositeImplicitAutograd_out_nll_loss_out(self, target, weight, reduction, ignore_index, out);
11208}
11209at::Tensor & nll_loss_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out) {
11210return wrapper_CompositeImplicitAutograd_out_nll_loss_out(self, target, weight, reduction, ignore_index, out);
11211}
11212at::Tensor & nll_loss_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
11213return wrapper_CompositeImplicitAutograd_out_nll_loss_out(self, target, weight, reduction, ignore_index, out);
11214}
11215at::Tensor & nll_loss_symint_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
11216return wrapper_CompositeImplicitAutograd_out_nll_loss_out(self, target, weight, reduction, ignore_index, out);
11217}
11218at::Tensor nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
11219return wrapper_CompositeImplicitAutograd__nll_loss_nd(self, target, weight, reduction, ignore_index);
11220}
11221at::Tensor nll_loss_nd_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
11222return wrapper_CompositeImplicitAutograd__nll_loss_nd(self, target, weight, reduction, ignore_index);
11223}
11224at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
11225return wrapper_CompositeImplicitAutograd__nll_loss2d(self, target, weight, reduction, ignore_index);
11226}
11227at::Tensor nll_loss2d_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
11228return wrapper_CompositeImplicitAutograd__nll_loss2d(self, target, weight, reduction, ignore_index);
11229}
11230at::Tensor & nll_loss2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
11231return wrapper_CompositeImplicitAutograd_out_nll_loss2d_out(self, target, weight, reduction, ignore_index, out);
11232}
11233at::Tensor & nll_loss2d_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out) {
11234return wrapper_CompositeImplicitAutograd_out_nll_loss2d_out(self, target, weight, reduction, ignore_index, out);
11235}
11236at::Tensor & nll_loss2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
11237return wrapper_CompositeImplicitAutograd_out_nll_loss2d_out(self, target, weight, reduction, ignore_index, out);
11238}
11239at::Tensor & nll_loss2d_symint_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
11240return wrapper_CompositeImplicitAutograd_out_nll_loss2d_out(self, target, weight, reduction, ignore_index, out);
11241}
11242at::Tensor log_sigmoid(const at::Tensor & self) {
11243return wrapper_CompositeImplicitAutograd__log_sigmoid(self);
11244}
11245at::Tensor & log_sigmoid_out(at::Tensor & out, const at::Tensor & self) {
11246return wrapper_CompositeImplicitAutograd_out_log_sigmoid_out(self, out);
11247}
11248at::Tensor & log_sigmoid_outf(const at::Tensor & self, at::Tensor & out) {
11249return wrapper_CompositeImplicitAutograd_out_log_sigmoid_out(self, out);
11250}
11251at::Tensor adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
11252return wrapper_CompositeImplicitAutograd__adaptive_avg_pool2d(self, c10::fromIntArrayRefSlow(output_size));
11253}
11254at::Tensor adaptive_avg_pool2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) {
11255return wrapper_CompositeImplicitAutograd__adaptive_avg_pool2d(self, output_size);
11256}
11257at::Tensor adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
11258return wrapper_CompositeImplicitAutograd__adaptive_avg_pool3d(self, c10::fromIntArrayRefSlow(output_size));
11259}
11260at::Tensor adaptive_avg_pool3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) {
11261return wrapper_CompositeImplicitAutograd__adaptive_avg_pool3d(self, output_size);
11262}
11263at::Tensor _pad_circular(const at::Tensor & self, at::IntArrayRef pad) {
11264return wrapper_CompositeImplicitAutograd___pad_circular(self, c10::fromIntArrayRefSlow(pad));
11265}
11266at::Tensor _pad_circular_symint(const at::Tensor & self, c10::SymIntArrayRef pad) {
11267return wrapper_CompositeImplicitAutograd___pad_circular(self, pad);
11268}
11269at::Tensor _pad_enum(const at::Tensor & self, at::IntArrayRef pad, int64_t mode, c10::optional<double> value) {
11270return wrapper_CompositeImplicitAutograd___pad_enum(self, c10::fromIntArrayRefSlow(pad), mode, value);
11271}
11272at::Tensor _pad_enum_symint(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional<double> value) {
11273return wrapper_CompositeImplicitAutograd___pad_enum(self, pad, mode, value);
11274}
11275at::Tensor pad(const at::Tensor & self, at::IntArrayRef pad, c10::string_view mode, c10::optional<double> value) {
11276return wrapper_CompositeImplicitAutograd__pad(self, c10::fromIntArrayRefSlow(pad), mode, value);
11277}
11278at::Tensor pad_symint(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional<double> value) {
11279return wrapper_CompositeImplicitAutograd__pad(self, pad, mode, value);
11280}
11281at::Tensor upsample_linear1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11282return wrapper_CompositeImplicitAutograd_vec_upsample_linear1d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
11283}
11284at::Tensor upsample_linear1d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11285return wrapper_CompositeImplicitAutograd_vec_upsample_linear1d(input, output_size, align_corners, scale_factors);
11286}
11287at::Tensor upsample_bilinear2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11288return wrapper_CompositeImplicitAutograd_vec_upsample_bilinear2d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
11289}
11290at::Tensor upsample_bilinear2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11291return wrapper_CompositeImplicitAutograd_vec_upsample_bilinear2d(input, output_size, align_corners, scale_factors);
11292}
11293at::Tensor _upsample_bilinear2d_aa(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11294return wrapper_CompositeImplicitAutograd_vec__upsample_bilinear2d_aa(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
11295}
11296at::Tensor _upsample_bilinear2d_aa_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11297return wrapper_CompositeImplicitAutograd_vec__upsample_bilinear2d_aa(input, output_size, align_corners, scale_factors);
11298}
11299at::Tensor upsample_trilinear3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11300return wrapper_CompositeImplicitAutograd_vec_upsample_trilinear3d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
11301}
11302at::Tensor upsample_trilinear3d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11303return wrapper_CompositeImplicitAutograd_vec_upsample_trilinear3d(input, output_size, align_corners, scale_factors);
11304}
11305at::Tensor upsample_bicubic2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11306return wrapper_CompositeImplicitAutograd_vec_upsample_bicubic2d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
11307}
11308at::Tensor upsample_bicubic2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11309return wrapper_CompositeImplicitAutograd_vec_upsample_bicubic2d(input, output_size, align_corners, scale_factors);
11310}
11311at::Tensor _upsample_bicubic2d_aa(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11312return wrapper_CompositeImplicitAutograd_vec__upsample_bicubic2d_aa(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
11313}
11314at::Tensor _upsample_bicubic2d_aa_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
11315return wrapper_CompositeImplicitAutograd_vec__upsample_bicubic2d_aa(input, output_size, align_corners, scale_factors);
11316}
11317at::Tensor upsample_nearest1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11318return wrapper_CompositeImplicitAutograd_vec_upsample_nearest1d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
11319}
11320at::Tensor upsample_nearest1d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11321return wrapper_CompositeImplicitAutograd_vec_upsample_nearest1d(input, output_size, scale_factors);
11322}
11323at::Tensor _upsample_nearest_exact1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11324return wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact1d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
11325}
11326at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11327return wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact1d(input, output_size, scale_factors);
11328}
11329at::Tensor upsample_nearest2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11330return wrapper_CompositeImplicitAutograd_vec_upsample_nearest2d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
11331}
11332at::Tensor upsample_nearest2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11333return wrapper_CompositeImplicitAutograd_vec_upsample_nearest2d(input, output_size, scale_factors);
11334}
11335at::Tensor _upsample_nearest_exact2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11336return wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact2d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
11337}
11338at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11339return wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact2d(input, output_size, scale_factors);
11340}
11341at::Tensor upsample_nearest3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11342return wrapper_CompositeImplicitAutograd_vec_upsample_nearest3d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
11343}
11344at::Tensor upsample_nearest3d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11345return wrapper_CompositeImplicitAutograd_vec_upsample_nearest3d(input, output_size, scale_factors);
11346}
11347at::Tensor _upsample_nearest_exact3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11348return wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact3d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
11349}
11350at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
11351return wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact3d(input, output_size, scale_factors);
11352}
11353at::Tensor thnn_conv2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
11354return wrapper_CompositeImplicitAutograd__thnn_conv2d(self, weight, kernel_size, bias, stride, padding);
11355}
11356at::Tensor & thnn_conv2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
11357return wrapper_CompositeImplicitAutograd_out_thnn_conv2d_out(self, weight, kernel_size, bias, stride, padding, out);
11358}
11359at::Tensor & thnn_conv2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
11360return wrapper_CompositeImplicitAutograd_out_thnn_conv2d_out(self, weight, kernel_size, bias, stride, padding, out);
11361}
11362at::Tensor slow_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
11363return wrapper_CompositeImplicitAutograd__slow_conv3d(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding));
11364}
11365at::Tensor slow_conv3d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
11366return wrapper_CompositeImplicitAutograd__slow_conv3d(self, weight, kernel_size, bias, stride, padding);
11367}
11368at::Tensor & slow_conv3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
11369return wrapper_CompositeImplicitAutograd_out_slow_conv3d_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), out);
11370}
11371at::Tensor & slow_conv3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
11372return wrapper_CompositeImplicitAutograd_out_slow_conv3d_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), out);
11373}
11374at::Tensor & slow_conv3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
11375return wrapper_CompositeImplicitAutograd_out_slow_conv3d_out(self, weight, kernel_size, bias, stride, padding, out);
11376}
11377at::Tensor & slow_conv3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) {
11378return wrapper_CompositeImplicitAutograd_out_slow_conv3d_out(self, weight, kernel_size, bias, stride, padding, out);
11379}
11380at::Tensor column_stack(at::TensorList tensors) {
11381return wrapper_CompositeImplicitAutograd__column_stack(tensors);
11382}
11383at::Tensor & column_stack_out(at::Tensor & out, at::TensorList tensors) {
11384return wrapper_CompositeImplicitAutograd_out_column_stack_out(tensors, out);
11385}
11386at::Tensor & column_stack_outf(at::TensorList tensors, at::Tensor & out) {
11387return wrapper_CompositeImplicitAutograd_out_column_stack_out(tensors, out);
11388}
11389at::Tensor isfinite(const at::Tensor & self) {
11390return wrapper_CompositeImplicitAutograd__isfinite(self);
11391}
11392at::Tensor _add_batch_dim(const at::Tensor & self, int64_t batch_dim, int64_t level) {
11393return wrapper_CompositeImplicitAutograd___add_batch_dim(self, batch_dim, level);
11394}
11395at::Tensor _remove_batch_dim(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) {
11396return wrapper_CompositeImplicitAutograd___remove_batch_dim(self, level, batch_size, out_dim);
11397}
11398at::Tensor special_expm1(const at::Tensor & self) {
11399return wrapper_CompositeImplicitAutograd__special_expm1(self);
11400}
11401at::Tensor & special_expm1_out(at::Tensor & out, const at::Tensor & self) {
11402return wrapper_CompositeImplicitAutograd_out_special_expm1_out(self, out);
11403}
11404at::Tensor & special_expm1_outf(const at::Tensor & self, at::Tensor & out) {
11405return wrapper_CompositeImplicitAutograd_out_special_expm1_out(self, out);
11406}
11407at::Tensor special_exp2(const at::Tensor & self) {
11408return wrapper_CompositeImplicitAutograd__special_exp2(self);
11409}
11410at::Tensor & special_exp2_out(at::Tensor & out, const at::Tensor & self) {
11411return wrapper_CompositeImplicitAutograd_out_special_exp2_out(self, out);
11412}
11413at::Tensor & special_exp2_outf(const at::Tensor & self, at::Tensor & out) {
11414return wrapper_CompositeImplicitAutograd_out_special_exp2_out(self, out);
11415}
11416at::Tensor special_psi(const at::Tensor & self) {
11417return wrapper_CompositeImplicitAutograd__special_psi(self);
11418}
11419at::Tensor & special_psi_out(at::Tensor & out, const at::Tensor & self) {
11420return wrapper_CompositeImplicitAutograd_out_special_psi_out(self, out);
11421}
11422at::Tensor & special_psi_outf(const at::Tensor & self, at::Tensor & out) {
11423return wrapper_CompositeImplicitAutograd_out_special_psi_out(self, out);
11424}
11425at::Tensor special_digamma(const at::Tensor & self) {
11426return wrapper_CompositeImplicitAutograd__special_digamma(self);
11427}
11428at::Tensor & special_digamma_out(at::Tensor & out, const at::Tensor & self) {
11429return wrapper_CompositeImplicitAutograd_out_special_digamma_out(self, out);
11430}
11431at::Tensor & special_digamma_outf(const at::Tensor & self, at::Tensor & out) {
11432return wrapper_CompositeImplicitAutograd_out_special_digamma_out(self, out);
11433}
11434at::Tensor special_gammaln(const at::Tensor & self) {
11435return wrapper_CompositeImplicitAutograd__special_gammaln(self);
11436}
11437at::Tensor & special_gammaln_out(at::Tensor & out, const at::Tensor & self) {
11438return wrapper_CompositeImplicitAutograd_out_special_gammaln_out(self, out);
11439}
11440at::Tensor & special_gammaln_outf(const at::Tensor & self, at::Tensor & out) {
11441return wrapper_CompositeImplicitAutograd_out_special_gammaln_out(self, out);
11442}
11443at::Tensor special_erf(const at::Tensor & self) {
11444return wrapper_CompositeImplicitAutograd__special_erf(self);
11445}
11446at::Tensor & special_erf_out(at::Tensor & out, const at::Tensor & self) {
11447return wrapper_CompositeImplicitAutograd_out_special_erf_out(self, out);
11448}
11449at::Tensor & special_erf_outf(const at::Tensor & self, at::Tensor & out) {
11450return wrapper_CompositeImplicitAutograd_out_special_erf_out(self, out);
11451}
11452at::Tensor special_erfc(const at::Tensor & self) {
11453return wrapper_CompositeImplicitAutograd__special_erfc(self);
11454}
11455at::Tensor & special_erfc_out(at::Tensor & out, const at::Tensor & self) {
11456return wrapper_CompositeImplicitAutograd_out_special_erfc_out(self, out);
11457}
11458at::Tensor & special_erfc_outf(const at::Tensor & self, at::Tensor & out) {
11459return wrapper_CompositeImplicitAutograd_out_special_erfc_out(self, out);
11460}
11461at::Tensor special_erfinv(const at::Tensor & self) {
11462return wrapper_CompositeImplicitAutograd__special_erfinv(self);
11463}
11464at::Tensor & special_erfinv_out(at::Tensor & out, const at::Tensor & self) {
11465return wrapper_CompositeImplicitAutograd_out_special_erfinv_out(self, out);
11466}
11467at::Tensor & special_erfinv_outf(const at::Tensor & self, at::Tensor & out) {
11468return wrapper_CompositeImplicitAutograd_out_special_erfinv_out(self, out);
11469}
11470at::Tensor special_ndtr(const at::Tensor & self) {
11471return wrapper_CompositeImplicitAutograd__special_ndtr(self);
11472}
11473at::Tensor & special_ndtr_out(at::Tensor & out, const at::Tensor & self) {
11474return wrapper_CompositeImplicitAutograd_out_special_ndtr_out(self, out);
11475}
11476at::Tensor & special_ndtr_outf(const at::Tensor & self, at::Tensor & out) {
11477return wrapper_CompositeImplicitAutograd_out_special_ndtr_out(self, out);
11478}
11479at::Tensor special_xlogy(const at::Tensor & self, const at::Tensor & other) {
11480return wrapper_CompositeImplicitAutograd__special_xlogy(self, other);
11481}
11482at::Tensor & special_xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11483return wrapper_CompositeImplicitAutograd_out_special_xlogy_out(self, other, out);
11484}
11485at::Tensor & special_xlogy_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11486return wrapper_CompositeImplicitAutograd_out_special_xlogy_out(self, other, out);
11487}
11488at::Tensor special_xlogy(const at::Scalar & self, const at::Tensor & other) {
11489return wrapper_CompositeImplicitAutograd_self_scalar_special_xlogy(self, other);
11490}
11491at::Tensor & special_xlogy_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) {
11492return wrapper_CompositeImplicitAutograd_self_scalar_out_special_xlogy_out(self, other, out);
11493}
11494at::Tensor & special_xlogy_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
11495return wrapper_CompositeImplicitAutograd_self_scalar_out_special_xlogy_out(self, other, out);
11496}
11497at::Tensor special_xlogy(const at::Tensor & self, const at::Scalar & other) {
11498return wrapper_CompositeImplicitAutograd_other_scalar_special_xlogy(self, other);
11499}
11500at::Tensor & special_xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
11501return wrapper_CompositeImplicitAutograd_other_scalar_out_special_xlogy_out(self, other, out);
11502}
11503at::Tensor & special_xlogy_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
11504return wrapper_CompositeImplicitAutograd_other_scalar_out_special_xlogy_out(self, other, out);
11505}
11506at::Tensor special_i0(const at::Tensor & self) {
11507return wrapper_CompositeImplicitAutograd__special_i0(self);
11508}
11509at::Tensor & special_i0_out(at::Tensor & out, const at::Tensor & self) {
11510return wrapper_CompositeImplicitAutograd_out_special_i0_out(self, out);
11511}
11512at::Tensor & special_i0_outf(const at::Tensor & self, at::Tensor & out) {
11513return wrapper_CompositeImplicitAutograd_out_special_i0_out(self, out);
11514}
11515at::Tensor special_logit(const at::Tensor & self, c10::optional<double> eps) {
11516return wrapper_CompositeImplicitAutograd__special_logit(self, eps);
11517}
11518at::Tensor & special_logit_out(at::Tensor & out, const at::Tensor & self, c10::optional<double> eps) {
11519return wrapper_CompositeImplicitAutograd_out_special_logit_out(self, eps, out);
11520}
11521at::Tensor & special_logit_outf(const at::Tensor & self, c10::optional<double> eps, at::Tensor & out) {
11522return wrapper_CompositeImplicitAutograd_out_special_logit_out(self, eps, out);
11523}
11524at::Tensor special_polygamma(int64_t n, const at::Tensor & self) {
11525return wrapper_CompositeImplicitAutograd__special_polygamma(n, self);
11526}
11527at::Tensor & special_polygamma_out(at::Tensor & out, int64_t n, const at::Tensor & self) {
11528return wrapper_CompositeImplicitAutograd_out_special_polygamma_out(n, self, out);
11529}
11530at::Tensor & special_polygamma_outf(int64_t n, const at::Tensor & self, at::Tensor & out) {
11531return wrapper_CompositeImplicitAutograd_out_special_polygamma_out(n, self, out);
11532}
11533at::Tensor special_logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
11534return wrapper_CompositeImplicitAutograd__special_logsumexp(self, dim, keepdim);
11535}
11536at::Tensor & special_logsumexp_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
11537return wrapper_CompositeImplicitAutograd_out_special_logsumexp_out(self, dim, keepdim, out);
11538}
11539at::Tensor & special_logsumexp_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
11540return wrapper_CompositeImplicitAutograd_out_special_logsumexp_out(self, dim, keepdim, out);
11541}
11542at::Tensor special_expit(const at::Tensor & self) {
11543return wrapper_CompositeImplicitAutograd__special_expit(self);
11544}
11545at::Tensor & special_expit_out(at::Tensor & out, const at::Tensor & self) {
11546return wrapper_CompositeImplicitAutograd_out_special_expit_out(self, out);
11547}
11548at::Tensor & special_expit_outf(const at::Tensor & self, at::Tensor & out) {
11549return wrapper_CompositeImplicitAutograd_out_special_expit_out(self, out);
11550}
11551at::Tensor special_sinc(const at::Tensor & self) {
11552return wrapper_CompositeImplicitAutograd__special_sinc(self);
11553}
11554at::Tensor & special_sinc_out(at::Tensor & out, const at::Tensor & self) {
11555return wrapper_CompositeImplicitAutograd_out_special_sinc_out(self, out);
11556}
11557at::Tensor & special_sinc_outf(const at::Tensor & self, at::Tensor & out) {
11558return wrapper_CompositeImplicitAutograd_out_special_sinc_out(self, out);
11559}
11560at::Tensor special_round(const at::Tensor & self, int64_t decimals) {
11561return wrapper_CompositeImplicitAutograd__special_round(self, decimals);
11562}
11563at::Tensor & special_round_out(at::Tensor & out, const at::Tensor & self, int64_t decimals) {
11564return wrapper_CompositeImplicitAutograd_out_special_round_out(self, decimals, out);
11565}
11566at::Tensor & special_round_outf(const at::Tensor & self, int64_t decimals, at::Tensor & out) {
11567return wrapper_CompositeImplicitAutograd_out_special_round_out(self, decimals, out);
11568}
11569at::Tensor special_log1p(const at::Tensor & self) {
11570return wrapper_CompositeImplicitAutograd__special_log1p(self);
11571}
11572at::Tensor & special_log1p_out(at::Tensor & out, const at::Tensor & self) {
11573return wrapper_CompositeImplicitAutograd_out_special_log1p_out(self, out);
11574}
11575at::Tensor & special_log1p_outf(const at::Tensor & self, at::Tensor & out) {
11576return wrapper_CompositeImplicitAutograd_out_special_log1p_out(self, out);
11577}
11578at::Tensor special_log_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
11579return wrapper_CompositeImplicitAutograd__special_log_softmax(self, dim, dtype);
11580}
11581at::Tensor special_gammainc(const at::Tensor & self, const at::Tensor & other) {
11582return wrapper_CompositeImplicitAutograd__special_gammainc(self, other);
11583}
11584at::Tensor & special_gammainc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11585return wrapper_CompositeImplicitAutograd_out_special_gammainc_out(self, other, out);
11586}
11587at::Tensor & special_gammainc_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11588return wrapper_CompositeImplicitAutograd_out_special_gammainc_out(self, other, out);
11589}
11590at::Tensor special_gammaincc(const at::Tensor & self, const at::Tensor & other) {
11591return wrapper_CompositeImplicitAutograd__special_gammaincc(self, other);
11592}
11593at::Tensor & special_gammaincc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11594return wrapper_CompositeImplicitAutograd_out_special_gammaincc_out(self, other, out);
11595}
11596at::Tensor & special_gammaincc_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11597return wrapper_CompositeImplicitAutograd_out_special_gammaincc_out(self, other, out);
11598}
11599at::Tensor special_multigammaln(const at::Tensor & self, int64_t p) {
11600return wrapper_CompositeImplicitAutograd__special_multigammaln(self, p);
11601}
11602at::Tensor & special_multigammaln_out(at::Tensor & out, const at::Tensor & self, int64_t p) {
11603return wrapper_CompositeImplicitAutograd_out_special_multigammaln_out(self, p, out);
11604}
11605at::Tensor & special_multigammaln_outf(const at::Tensor & self, int64_t p, at::Tensor & out) {
11606return wrapper_CompositeImplicitAutograd_out_special_multigammaln_out(self, p, out);
11607}
11608at::Tensor special_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
11609return wrapper_CompositeImplicitAutograd__special_softmax(self, dim, dtype);
11610}
11611at::Tensor fft_fft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11612return wrapper_CompositeImplicitAutograd__fft_fft(self, n, dim, norm);
11613}
11614at::Tensor & fft_fft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11615return wrapper_CompositeImplicitAutograd_out_fft_fft_out(self, n, dim, norm, out);
11616}
11617at::Tensor & fft_fft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11618return wrapper_CompositeImplicitAutograd_out_fft_fft_out(self, n, dim, norm, out);
11619}
11620at::Tensor fft_ifft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11621return wrapper_CompositeImplicitAutograd__fft_ifft(self, n, dim, norm);
11622}
11623at::Tensor & fft_ifft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11624return wrapper_CompositeImplicitAutograd_out_fft_ifft_out(self, n, dim, norm, out);
11625}
11626at::Tensor & fft_ifft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11627return wrapper_CompositeImplicitAutograd_out_fft_ifft_out(self, n, dim, norm, out);
11628}
11629at::Tensor fft_rfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11630return wrapper_CompositeImplicitAutograd__fft_rfft(self, n, dim, norm);
11631}
11632at::Tensor & fft_rfft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11633return wrapper_CompositeImplicitAutograd_out_fft_rfft_out(self, n, dim, norm, out);
11634}
11635at::Tensor & fft_rfft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11636return wrapper_CompositeImplicitAutograd_out_fft_rfft_out(self, n, dim, norm, out);
11637}
11638at::Tensor fft_irfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11639return wrapper_CompositeImplicitAutograd__fft_irfft(self, n, dim, norm);
11640}
11641at::Tensor & fft_irfft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11642return wrapper_CompositeImplicitAutograd_out_fft_irfft_out(self, n, dim, norm, out);
11643}
11644at::Tensor & fft_irfft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11645return wrapper_CompositeImplicitAutograd_out_fft_irfft_out(self, n, dim, norm, out);
11646}
11647at::Tensor fft_hfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11648return wrapper_CompositeImplicitAutograd__fft_hfft(self, n, dim, norm);
11649}
11650at::Tensor & fft_hfft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11651return wrapper_CompositeImplicitAutograd_out_fft_hfft_out(self, n, dim, norm, out);
11652}
11653at::Tensor & fft_hfft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11654return wrapper_CompositeImplicitAutograd_out_fft_hfft_out(self, n, dim, norm, out);
11655}
11656at::Tensor fft_ihfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11657return wrapper_CompositeImplicitAutograd__fft_ihfft(self, n, dim, norm);
11658}
11659at::Tensor & fft_ihfft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
11660return wrapper_CompositeImplicitAutograd_out_fft_ihfft_out(self, n, dim, norm, out);
11661}
11662at::Tensor & fft_ihfft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11663return wrapper_CompositeImplicitAutograd_out_fft_ihfft_out(self, n, dim, norm, out);
11664}
11665at::Tensor fft_fft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11666return wrapper_CompositeImplicitAutograd__fft_fft2(self, s, dim, norm);
11667}
11668at::Tensor & fft_fft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11669return wrapper_CompositeImplicitAutograd_out_fft_fft2_out(self, s, dim, norm, out);
11670}
11671at::Tensor & fft_fft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11672return wrapper_CompositeImplicitAutograd_out_fft_fft2_out(self, s, dim, norm, out);
11673}
11674at::Tensor fft_ifft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11675return wrapper_CompositeImplicitAutograd__fft_ifft2(self, s, dim, norm);
11676}
11677at::Tensor & fft_ifft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11678return wrapper_CompositeImplicitAutograd_out_fft_ifft2_out(self, s, dim, norm, out);
11679}
11680at::Tensor & fft_ifft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11681return wrapper_CompositeImplicitAutograd_out_fft_ifft2_out(self, s, dim, norm, out);
11682}
11683at::Tensor fft_rfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11684return wrapper_CompositeImplicitAutograd__fft_rfft2(self, s, dim, norm);
11685}
11686at::Tensor & fft_rfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11687return wrapper_CompositeImplicitAutograd_out_fft_rfft2_out(self, s, dim, norm, out);
11688}
11689at::Tensor & fft_rfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11690return wrapper_CompositeImplicitAutograd_out_fft_rfft2_out(self, s, dim, norm, out);
11691}
11692at::Tensor fft_irfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11693return wrapper_CompositeImplicitAutograd__fft_irfft2(self, s, dim, norm);
11694}
11695at::Tensor & fft_irfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11696return wrapper_CompositeImplicitAutograd_out_fft_irfft2_out(self, s, dim, norm, out);
11697}
11698at::Tensor & fft_irfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11699return wrapper_CompositeImplicitAutograd_out_fft_irfft2_out(self, s, dim, norm, out);
11700}
11701at::Tensor fft_hfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11702return wrapper_CompositeImplicitAutograd__fft_hfft2(self, s, dim, norm);
11703}
11704const at::Tensor & fft_hfft2_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11705return wrapper_CompositeImplicitAutograd_out_fft_hfft2_out(self, s, dim, norm, out);
11706}
11707const at::Tensor & fft_hfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
11708return wrapper_CompositeImplicitAutograd_out_fft_hfft2_out(self, s, dim, norm, out);
11709}
11710at::Tensor fft_ihfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11711return wrapper_CompositeImplicitAutograd__fft_ihfft2(self, s, dim, norm);
11712}
11713const at::Tensor & fft_ihfft2_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
11714return wrapper_CompositeImplicitAutograd_out_fft_ihfft2_out(self, s, dim, norm, out);
11715}
11716const at::Tensor & fft_ihfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
11717return wrapper_CompositeImplicitAutograd_out_fft_ihfft2_out(self, s, dim, norm, out);
11718}
11719at::Tensor fft_fftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11720return wrapper_CompositeImplicitAutograd__fft_fftn(self, s, dim, norm);
11721}
11722at::Tensor & fft_fftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11723return wrapper_CompositeImplicitAutograd_out_fft_fftn_out(self, s, dim, norm, out);
11724}
11725at::Tensor & fft_fftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11726return wrapper_CompositeImplicitAutograd_out_fft_fftn_out(self, s, dim, norm, out);
11727}
11728at::Tensor fft_ifftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11729return wrapper_CompositeImplicitAutograd__fft_ifftn(self, s, dim, norm);
11730}
11731at::Tensor & fft_ifftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11732return wrapper_CompositeImplicitAutograd_out_fft_ifftn_out(self, s, dim, norm, out);
11733}
11734at::Tensor & fft_ifftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11735return wrapper_CompositeImplicitAutograd_out_fft_ifftn_out(self, s, dim, norm, out);
11736}
11737at::Tensor fft_rfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11738return wrapper_CompositeImplicitAutograd__fft_rfftn(self, s, dim, norm);
11739}
11740at::Tensor & fft_rfftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11741return wrapper_CompositeImplicitAutograd_out_fft_rfftn_out(self, s, dim, norm, out);
11742}
11743at::Tensor & fft_rfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11744return wrapper_CompositeImplicitAutograd_out_fft_rfftn_out(self, s, dim, norm, out);
11745}
11746at::Tensor fft_irfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11747return wrapper_CompositeImplicitAutograd__fft_irfftn(self, s, dim, norm);
11748}
11749at::Tensor & fft_irfftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11750return wrapper_CompositeImplicitAutograd_out_fft_irfftn_out(self, s, dim, norm, out);
11751}
11752at::Tensor & fft_irfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
11753return wrapper_CompositeImplicitAutograd_out_fft_irfftn_out(self, s, dim, norm, out);
11754}
11755at::Tensor fft_hfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11756return wrapper_CompositeImplicitAutograd__fft_hfftn(self, s, dim, norm);
11757}
11758const at::Tensor & fft_hfftn_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11759return wrapper_CompositeImplicitAutograd_out_fft_hfftn_out(self, s, dim, norm, out);
11760}
11761const at::Tensor & fft_hfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
11762return wrapper_CompositeImplicitAutograd_out_fft_hfftn_out(self, s, dim, norm, out);
11763}
11764at::Tensor fft_ihfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11765return wrapper_CompositeImplicitAutograd__fft_ihfftn(self, s, dim, norm);
11766}
11767const at::Tensor & fft_ihfftn_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
11768return wrapper_CompositeImplicitAutograd_out_fft_ihfftn_out(self, s, dim, norm, out);
11769}
11770const at::Tensor & fft_ihfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
11771return wrapper_CompositeImplicitAutograd_out_fft_ihfftn_out(self, s, dim, norm, out);
11772}
11773at::Tensor fft_fftshift(const at::Tensor & self, at::OptionalIntArrayRef dim) {
11774return wrapper_CompositeImplicitAutograd__fft_fftshift(self, dim);
11775}
11776at::Tensor fft_ifftshift(const at::Tensor & self, at::OptionalIntArrayRef dim) {
11777return wrapper_CompositeImplicitAutograd__fft_ifftshift(self, dim);
11778}
11779at::Tensor linalg_cholesky(const at::Tensor & self, bool upper) {
11780return wrapper_CompositeImplicitAutograd__linalg_cholesky(self, upper);
11781}
11782at::Tensor & linalg_cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper) {
11783return wrapper_CompositeImplicitAutograd_out_linalg_cholesky_out(self, upper, out);
11784}
11785at::Tensor & linalg_cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out) {
11786return wrapper_CompositeImplicitAutograd_out_linalg_cholesky_out(self, upper, out);
11787}
11788::std::tuple<at::Tensor,at::Tensor> linalg_lu_factor(const at::Tensor & A, bool pivot) {
11789return wrapper_CompositeImplicitAutograd__linalg_lu_factor(A, pivot);
11790}
11791::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_out(at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A, bool pivot) {
11792return wrapper_CompositeImplicitAutograd_out_linalg_lu_factor_out(A, pivot, LU, pivots);
11793}
11794::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_outf(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) {
11795return wrapper_CompositeImplicitAutograd_out_linalg_lu_factor_out(A, pivot, LU, pivots);
11796}
11797at::Tensor linalg_det(const at::Tensor & A) {
11798return wrapper_CompositeImplicitAutograd__linalg_det(A);
11799}
11800at::Tensor & linalg_det_out(at::Tensor & out, const at::Tensor & A) {
11801return wrapper_CompositeImplicitAutograd_out_linalg_det_out(A, out);
11802}
11803at::Tensor & linalg_det_outf(const at::Tensor & A, at::Tensor & out) {
11804return wrapper_CompositeImplicitAutograd_out_linalg_det_out(A, out);
11805}
11806at::Tensor det(const at::Tensor & self) {
11807return wrapper_CompositeImplicitAutograd__det(self);
11808}
11809::std::tuple<at::Tensor,at::Tensor> linalg_ldl_factor(const at::Tensor & self, bool hermitian) {
11810return wrapper_CompositeImplicitAutograd__linalg_ldl_factor(self, hermitian);
11811}
11812::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_out(at::Tensor & LD, at::Tensor & pivots, const at::Tensor & self, bool hermitian) {
11813return wrapper_CompositeImplicitAutograd_out_linalg_ldl_factor_out(self, hermitian, LD, pivots);
11814}
11815::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_outf(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) {
11816return wrapper_CompositeImplicitAutograd_out_linalg_ldl_factor_out(self, hermitian, LD, pivots);
11817}
11818at::Tensor linalg_matmul(const at::Tensor & self, const at::Tensor & other) {
11819return wrapper_CompositeImplicitAutograd__linalg_matmul(self, other);
11820}
11821at::Tensor & linalg_matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11822return wrapper_CompositeImplicitAutograd_out_linalg_matmul_out(self, other, out);
11823}
11824at::Tensor & linalg_matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11825return wrapper_CompositeImplicitAutograd_out_linalg_matmul_out(self, other, out);
11826}
11827at::Tensor linalg_vecdot(const at::Tensor & x, const at::Tensor & y, int64_t dim) {
11828return wrapper_CompositeImplicitAutograd__linalg_vecdot(x, y, dim);
11829}
11830at::Tensor & linalg_vecdot_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & y, int64_t dim) {
11831return wrapper_CompositeImplicitAutograd_out_linalg_vecdot_out(x, y, dim, out);
11832}
11833at::Tensor & linalg_vecdot_outf(const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) {
11834return wrapper_CompositeImplicitAutograd_out_linalg_vecdot_out(x, y, dim, out);
11835}
11836::std::tuple<at::Tensor,at::Tensor> linalg_slogdet(const at::Tensor & A) {
11837return wrapper_CompositeImplicitAutograd__linalg_slogdet(A);
11838}
11839::std::tuple<at::Tensor &,at::Tensor &> linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, const at::Tensor & A) {
11840return wrapper_CompositeImplicitAutograd_out_linalg_slogdet_out(A, sign, logabsdet);
11841}
11842::std::tuple<at::Tensor &,at::Tensor &> linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) {
11843return wrapper_CompositeImplicitAutograd_out_linalg_slogdet_out(A, sign, logabsdet);
11844}
11845::std::tuple<at::Tensor,at::Tensor> slogdet(const at::Tensor & self) {
11846return wrapper_CompositeImplicitAutograd__slogdet(self);
11847}
11848::std::tuple<at::Tensor &,at::Tensor &> slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, const at::Tensor & self) {
11849return wrapper_CompositeImplicitAutograd_out_slogdet_out(self, sign, logabsdet);
11850}
11851::std::tuple<at::Tensor &,at::Tensor &> slogdet_outf(const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet) {
11852return wrapper_CompositeImplicitAutograd_out_slogdet_out(self, sign, logabsdet);
11853}
11854at::Tensor logdet(const at::Tensor & self) {
11855return wrapper_CompositeImplicitAutograd__logdet(self);
11856}
11857at::Tensor linalg_eigvals(const at::Tensor & self) {
11858return wrapper_CompositeImplicitAutograd__linalg_eigvals(self);
11859}
11860at::Tensor & linalg_eigvals_out(at::Tensor & out, const at::Tensor & self) {
11861return wrapper_CompositeImplicitAutograd_out_linalg_eigvals_out(self, out);
11862}
11863at::Tensor & linalg_eigvals_outf(const at::Tensor & self, at::Tensor & out) {
11864return wrapper_CompositeImplicitAutograd_out_linalg_eigvals_out(self, out);
11865}
11866::std::tuple<at::Tensor,at::Tensor> linalg_eigh(const at::Tensor & self, c10::string_view UPLO) {
11867return wrapper_CompositeImplicitAutograd__linalg_eigh(self, UPLO);
11868}
11869::std::tuple<at::Tensor &,at::Tensor &> linalg_eigh_out(at::Tensor & eigvals, at::Tensor & eigvecs, const at::Tensor & self, c10::string_view UPLO) {
11870return wrapper_CompositeImplicitAutograd_eigvals_linalg_eigh_out(self, UPLO, eigvals, eigvecs);
11871}
11872::std::tuple<at::Tensor &,at::Tensor &> linalg_eigh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) {
11873return wrapper_CompositeImplicitAutograd_eigvals_linalg_eigh_out(self, UPLO, eigvals, eigvecs);
11874}
11875at::Tensor linalg_eigvalsh(const at::Tensor & self, c10::string_view UPLO) {
11876return wrapper_CompositeImplicitAutograd__linalg_eigvalsh(self, UPLO);
11877}
11878at::Tensor & linalg_eigvalsh_out(at::Tensor & out, const at::Tensor & self, c10::string_view UPLO) {
11879return wrapper_CompositeImplicitAutograd_out_linalg_eigvalsh_out(self, UPLO, out);
11880}
11881at::Tensor & linalg_eigvalsh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) {
11882return wrapper_CompositeImplicitAutograd_out_linalg_eigvalsh_out(self, UPLO, out);
11883}
11884at::Tensor linalg_inv(const at::Tensor & A) {
11885return wrapper_CompositeImplicitAutograd__linalg_inv(A);
11886}
11887at::Tensor & linalg_inv_out(at::Tensor & out, const at::Tensor & A) {
11888return wrapper_CompositeImplicitAutograd_out_linalg_inv_out(A, out);
11889}
11890at::Tensor & linalg_inv_outf(const at::Tensor & A, at::Tensor & out) {
11891return wrapper_CompositeImplicitAutograd_out_linalg_inv_out(A, out);
11892}
11893at::Tensor inverse(const at::Tensor & self) {
11894return wrapper_CompositeImplicitAutograd__inverse(self);
11895}
11896at::Tensor & inverse_out(at::Tensor & out, const at::Tensor & self) {
11897return wrapper_CompositeImplicitAutograd_out_inverse_out(self, out);
11898}
11899at::Tensor & inverse_outf(const at::Tensor & self, at::Tensor & out) {
11900return wrapper_CompositeImplicitAutograd_out_inverse_out(self, out);
11901}
11902at::Tensor inner(const at::Tensor & self, const at::Tensor & other) {
11903return wrapper_CompositeImplicitAutograd__inner(self, other);
11904}
11905at::Tensor & inner_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11906return wrapper_CompositeImplicitAutograd_out_inner_out(self, other, out);
11907}
11908at::Tensor & inner_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11909return wrapper_CompositeImplicitAutograd_out_inner_out(self, other, out);
11910}
11911at::Tensor outer(const at::Tensor & self, const at::Tensor & vec2) {
11912return wrapper_CompositeImplicitAutograd__outer(self, vec2);
11913}
11914at::Tensor & outer_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2) {
11915return wrapper_CompositeImplicitAutograd_out_outer_out(self, vec2, out);
11916}
11917at::Tensor & outer_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
11918return wrapper_CompositeImplicitAutograd_out_outer_out(self, vec2, out);
11919}
11920at::Tensor ger(const at::Tensor & self, const at::Tensor & vec2) {
11921return wrapper_CompositeImplicitAutograd__ger(self, vec2);
11922}
11923at::Tensor & ger_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2) {
11924return wrapper_CompositeImplicitAutograd_out_ger_out(self, vec2, out);
11925}
11926at::Tensor & ger_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
11927return wrapper_CompositeImplicitAutograd_out_ger_out(self, vec2, out);
11928}
11929at::Tensor linalg_norm(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11930return wrapper_CompositeImplicitAutograd__linalg_norm(self, ord, dim, keepdim, dtype);
11931}
11932at::Tensor & linalg_norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11933return wrapper_CompositeImplicitAutograd_out_linalg_norm_out(self, ord, dim, keepdim, dtype, out);
11934}
11935at::Tensor & linalg_norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
11936return wrapper_CompositeImplicitAutograd_out_linalg_norm_out(self, ord, dim, keepdim, dtype, out);
11937}
11938at::Tensor linalg_norm(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11939return wrapper_CompositeImplicitAutograd_ord_str_linalg_norm(self, ord, dim, keepdim, dtype);
11940}
11941at::Tensor & linalg_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11942return wrapper_CompositeImplicitAutograd_ord_str_out_linalg_norm_out(self, ord, dim, keepdim, dtype, out);
11943}
11944at::Tensor & linalg_norm_outf(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
11945return wrapper_CompositeImplicitAutograd_ord_str_out_linalg_norm_out(self, ord, dim, keepdim, dtype, out);
11946}
11947at::Tensor linalg_matrix_norm(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11948return wrapper_CompositeImplicitAutograd__linalg_matrix_norm(self, ord, dim, keepdim, dtype);
11949}
11950at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11951return wrapper_CompositeImplicitAutograd_out_linalg_matrix_norm_out(self, ord, dim, keepdim, dtype, out);
11952}
11953at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
11954return wrapper_CompositeImplicitAutograd_out_linalg_matrix_norm_out(self, ord, dim, keepdim, dtype, out);
11955}
11956at::Tensor linalg_matrix_norm(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11957return wrapper_CompositeImplicitAutograd_str_ord_linalg_matrix_norm(self, ord, dim, keepdim, dtype);
11958}
11959at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11960return wrapper_CompositeImplicitAutograd_str_ord_out_linalg_matrix_norm_out(self, ord, dim, keepdim, dtype, out);
11961}
11962at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
11963return wrapper_CompositeImplicitAutograd_str_ord_out_linalg_matrix_norm_out(self, ord, dim, keepdim, dtype, out);
11964}
11965::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_svd(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver) {
11966return wrapper_CompositeImplicitAutograd__linalg_svd(A, full_matrices, driver);
11967}
11968::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver) {
11969return wrapper_CompositeImplicitAutograd_U_linalg_svd_out(A, full_matrices, driver, U, S, Vh);
11970}
11971::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_svd_outf(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
11972return wrapper_CompositeImplicitAutograd_U_linalg_svd_out(A, full_matrices, driver, U, S, Vh);
11973}
11974at::Tensor linalg_svdvals(const at::Tensor & A, c10::optional<c10::string_view> driver) {
11975return wrapper_CompositeImplicitAutograd__linalg_svdvals(A, driver);
11976}
11977at::Tensor & linalg_svdvals_out(at::Tensor & out, const at::Tensor & A, c10::optional<c10::string_view> driver) {
11978return wrapper_CompositeImplicitAutograd_out_linalg_svdvals_out(A, driver, out);
11979}
11980at::Tensor & linalg_svdvals_outf(const at::Tensor & A, c10::optional<c10::string_view> driver, at::Tensor & out) {
11981return wrapper_CompositeImplicitAutograd_out_linalg_svdvals_out(A, driver, out);
11982}
11983at::Tensor linalg_cond(const at::Tensor & self, const c10::optional<at::Scalar> & p) {
11984return wrapper_CompositeImplicitAutograd__linalg_cond(self, p);
11985}
11986at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p) {
11987return wrapper_CompositeImplicitAutograd_out_linalg_cond_out(self, p, out);
11988}
11989at::Tensor & linalg_cond_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::Tensor & out) {
11990return wrapper_CompositeImplicitAutograd_out_linalg_cond_out(self, p, out);
11991}
11992at::Tensor linalg_cond(const at::Tensor & self, c10::string_view p) {
11993return wrapper_CompositeImplicitAutograd_p_str_linalg_cond(self, p);
11994}
11995at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, c10::string_view p) {
11996return wrapper_CompositeImplicitAutograd_p_str_out_linalg_cond_out(self, p, out);
11997}
11998at::Tensor & linalg_cond_outf(const at::Tensor & self, c10::string_view p, at::Tensor & out) {
11999return wrapper_CompositeImplicitAutograd_p_str_out_linalg_cond_out(self, p, out);
12000}
12001at::Tensor linalg_pinv(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
12002return wrapper_CompositeImplicitAutograd_atol_rtol_float_linalg_pinv(self, atol, rtol, hermitian);
12003}
12004at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
12005return wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_pinv_out(self, atol, rtol, hermitian, out);
12006}
12007at::Tensor & linalg_pinv_outf(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) {
12008return wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_pinv_out(self, atol, rtol, hermitian, out);
12009}
12010at::Tensor linalg_pinv(const at::Tensor & self, double rcond, bool hermitian) {
12011return wrapper_CompositeImplicitAutograd__linalg_pinv(self, rcond, hermitian);
12012}
12013at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, double rcond, bool hermitian) {
12014return wrapper_CompositeImplicitAutograd_out_linalg_pinv_out(self, rcond, hermitian, out);
12015}
12016at::Tensor & linalg_pinv_outf(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) {
12017return wrapper_CompositeImplicitAutograd_out_linalg_pinv_out(self, rcond, hermitian, out);
12018}
12019at::Tensor linalg_pinv(const at::Tensor & self, const at::Tensor & rcond, bool hermitian) {
12020return wrapper_CompositeImplicitAutograd_rcond_tensor_linalg_pinv(self, rcond, hermitian);
12021}
12022at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & rcond, bool hermitian) {
12023return wrapper_CompositeImplicitAutograd_out_rcond_tensor_linalg_pinv_out(self, rcond, hermitian, out);
12024}
12025at::Tensor & linalg_pinv_outf(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) {
12026return wrapper_CompositeImplicitAutograd_out_rcond_tensor_linalg_pinv_out(self, rcond, hermitian, out);
12027}
12028::std::tuple<at::Tensor,at::Tensor> linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
12029return wrapper_CompositeImplicitAutograd__linalg_solve_ex(A, B, left, check_errors);
12030}
12031::std::tuple<at::Tensor &,at::Tensor &> linalg_solve_ex_out(at::Tensor & result, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
12032return wrapper_CompositeImplicitAutograd_out_linalg_solve_ex_out(A, B, left, check_errors, result, info);
12033}
12034::std::tuple<at::Tensor &,at::Tensor &> linalg_solve_ex_outf(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) {
12035return wrapper_CompositeImplicitAutograd_out_linalg_solve_ex_out(A, B, left, check_errors, result, info);
12036}
12037at::Tensor linalg_solve(const at::Tensor & A, const at::Tensor & B, bool left) {
12038return wrapper_CompositeImplicitAutograd__linalg_solve(A, B, left);
12039}
12040at::Tensor & linalg_solve_out(at::Tensor & out, const at::Tensor & A, const at::Tensor & B, bool left) {
12041return wrapper_CompositeImplicitAutograd_out_linalg_solve_out(A, B, left, out);
12042}
12043at::Tensor & linalg_solve_outf(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) {
12044return wrapper_CompositeImplicitAutograd_out_linalg_solve_out(A, B, left, out);
12045}
12046at::Tensor linalg_tensorinv(const at::Tensor & self, int64_t ind) {
12047return wrapper_CompositeImplicitAutograd__linalg_tensorinv(self, ind);
12048}
12049at::Tensor & linalg_tensorinv_out(at::Tensor & out, const at::Tensor & self, int64_t ind) {
12050return wrapper_CompositeImplicitAutograd_out_linalg_tensorinv_out(self, ind, out);
12051}
12052at::Tensor & linalg_tensorinv_outf(const at::Tensor & self, int64_t ind, at::Tensor & out) {
12053return wrapper_CompositeImplicitAutograd_out_linalg_tensorinv_out(self, ind, out);
12054}
12055at::Tensor linalg_tensorsolve(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) {
12056return wrapper_CompositeImplicitAutograd__linalg_tensorsolve(self, other, dims);
12057}
12058at::Tensor & linalg_tensorsolve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) {
12059return wrapper_CompositeImplicitAutograd_out_linalg_tensorsolve_out(self, other, dims, out);
12060}
12061at::Tensor & linalg_tensorsolve_outf(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) {
12062return wrapper_CompositeImplicitAutograd_out_linalg_tensorsolve_out(self, other, dims, out);
12063}
12064at::Tensor linalg_matrix_power(const at::Tensor & self, int64_t n) {
12065return wrapper_CompositeImplicitAutograd__linalg_matrix_power(self, n);
12066}
12067at::Tensor & linalg_matrix_power_out(at::Tensor & out, const at::Tensor & self, int64_t n) {
12068return wrapper_CompositeImplicitAutograd_out_linalg_matrix_power_out(self, n, out);
12069}
12070at::Tensor & linalg_matrix_power_outf(const at::Tensor & self, int64_t n, at::Tensor & out) {
12071return wrapper_CompositeImplicitAutograd_out_linalg_matrix_power_out(self, n, out);
12072}
12073at::Tensor linalg_matrix_rank(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
12074return wrapper_CompositeImplicitAutograd_atol_rtol_tensor_linalg_matrix_rank(input, atol, rtol, hermitian);
12075}
12076at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
12077return wrapper_CompositeImplicitAutograd_atol_rtol_tensor_out_linalg_matrix_rank_out(input, atol, rtol, hermitian, out);
12078}
12079at::Tensor & linalg_matrix_rank_outf(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
12080return wrapper_CompositeImplicitAutograd_atol_rtol_tensor_out_linalg_matrix_rank_out(input, atol, rtol, hermitian, out);
12081}
12082at::Tensor linalg_matrix_rank(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
12083return wrapper_CompositeImplicitAutograd_atol_rtol_float_linalg_matrix_rank(self, atol, rtol, hermitian);
12084}
12085at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
12086return wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_matrix_rank_out(self, atol, rtol, hermitian, out);
12087}
12088at::Tensor & linalg_matrix_rank_outf(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) {
12089return wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_matrix_rank_out(self, atol, rtol, hermitian, out);
12090}
12091at::Tensor linalg_matrix_rank(const at::Tensor & self, double tol, bool hermitian) {
12092return wrapper_CompositeImplicitAutograd__linalg_matrix_rank(self, tol, hermitian);
12093}
12094at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & self, double tol, bool hermitian) {
12095return wrapper_CompositeImplicitAutograd_out_linalg_matrix_rank_out(self, tol, hermitian, out);
12096}
12097at::Tensor & linalg_matrix_rank_outf(const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) {
12098return wrapper_CompositeImplicitAutograd_out_linalg_matrix_rank_out(self, tol, hermitian, out);
12099}
12100at::Tensor linalg_matrix_rank(const at::Tensor & input, const at::Tensor & tol, bool hermitian) {
12101return wrapper_CompositeImplicitAutograd_tol_tensor_linalg_matrix_rank(input, tol, hermitian);
12102}
12103at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tol, bool hermitian) {
12104return wrapper_CompositeImplicitAutograd_out_tol_tensor_linalg_matrix_rank_out(input, tol, hermitian, out);
12105}
12106at::Tensor & linalg_matrix_rank_outf(const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) {
12107return wrapper_CompositeImplicitAutograd_out_tol_tensor_linalg_matrix_rank_out(input, tol, hermitian, out);
12108}
12109at::Tensor linalg_multi_dot(at::TensorList tensors) {
12110return wrapper_CompositeImplicitAutograd__linalg_multi_dot(tensors);
12111}
12112at::Tensor & linalg_multi_dot_out(at::Tensor & out, at::TensorList tensors) {
12113return wrapper_CompositeImplicitAutograd_out_linalg_multi_dot_out(tensors, out);
12114}
12115at::Tensor & linalg_multi_dot_outf(at::TensorList tensors, at::Tensor & out) {
12116return wrapper_CompositeImplicitAutograd_out_linalg_multi_dot_out(tensors, out);
12117}
12118at::Tensor nested_to_padded_tensor(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) {
12119return wrapper_CompositeImplicitAutograd__nested_to_padded_tensor(self, padding, output_size);
12120}
12121at::Tensor _test_serialization_subcmul(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
12122return wrapper_CompositeImplicitAutograd___test_serialization_subcmul(self, other, alpha);
12123}
12124at::Tensor _test_string_default(const at::Tensor & dummy, c10::string_view a, c10::string_view b) {
12125return wrapper_CompositeImplicitAutograd___test_string_default(dummy, a, b);
12126}
12127at::Tensor _test_ambiguous_defaults(const at::Tensor & dummy, int64_t a, int64_t b) {
12128return wrapper_CompositeImplicitAutograd_a__test_ambiguous_defaults(dummy, a, b);
12129}
12130at::Tensor _test_ambiguous_defaults(const at::Tensor & dummy, int64_t a, c10::string_view b) {
12131return wrapper_CompositeImplicitAutograd_b__test_ambiguous_defaults(dummy, a, b);
12132}
12133at::Tensor _test_autograd_multiple_dispatch(const at::Tensor & self, bool b) {
12134return wrapper_CompositeImplicitAutograd_ntonly__test_autograd_multiple_dispatch(self, b);
12135}
12136at::Tensor pad_sequence(at::TensorList sequences, bool batch_first, double padding_value) {
12137return wrapper_CompositeImplicitAutograd__pad_sequence(sequences, batch_first, padding_value);
12138}
12139at::Tensor flatten_dense_tensors(at::TensorList tensors) {
12140return wrapper_CompositeImplicitAutograd__flatten_dense_tensors(tensors);
12141}
12142::std::vector<at::Tensor> unflatten_dense_tensors(const at::Tensor & flat, at::TensorList tensors) {
12143return wrapper_CompositeImplicitAutograd__unflatten_dense_tensors(flat, tensors);
12144}
12145at::Tensor scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal) {
12146return wrapper_CompositeImplicitAutograd__scaled_dot_product_attention(query, key, value, attn_mask, dropout_p, is_causal);
12147}
12148::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool need_attn_weights, bool is_causal) {
12149return wrapper_CompositeImplicitAutograd___scaled_dot_product_attention(query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal);
12150}
12151::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const c10::optional<at::Tensor> & dropout_mask) {
12152return wrapper_CompositeImplicitAutograd___scaled_dot_product_attention_math(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask);
12153}
12154at::Tensor special_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n) {
12155return wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_t(x, n);
12156}
12157at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
12158return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_t_out(x, n, out);
12159}
12160at::Tensor & special_chebyshev_polynomial_t_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12161return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_t_out(x, n, out);
12162}
12163at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n) {
12164return wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_t(x, n);
12165}
12166at::Tensor special_chebyshev_polynomial_u(const at::Scalar & x, const at::Tensor & n) {
12167return wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_u(x, n);
12168}
12169at::Tensor & special_chebyshev_polynomial_u_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
12170return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_u_out(x, n, out);
12171}
12172at::Tensor & special_chebyshev_polynomial_u_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12173return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_u_out(x, n, out);
12174}
12175at::Tensor special_chebyshev_polynomial_u(const at::Tensor & x, const at::Scalar & n) {
12176return wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_u(x, n);
12177}
12178at::Tensor special_chebyshev_polynomial_v(const at::Scalar & x, const at::Tensor & n) {
12179return wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_v(x, n);
12180}
12181at::Tensor & special_chebyshev_polynomial_v_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
12182return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_v_out(x, n, out);
12183}
12184at::Tensor & special_chebyshev_polynomial_v_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12185return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_v_out(x, n, out);
12186}
12187at::Tensor special_chebyshev_polynomial_v(const at::Tensor & x, const at::Scalar & n) {
12188return wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_v(x, n);
12189}
12190at::Tensor special_chebyshev_polynomial_w(const at::Scalar & x, const at::Tensor & n) {
12191return wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_w(x, n);
12192}
12193at::Tensor & special_chebyshev_polynomial_w_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
12194return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_w_out(x, n, out);
12195}
12196at::Tensor & special_chebyshev_polynomial_w_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12197return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_w_out(x, n, out);
12198}
12199at::Tensor special_chebyshev_polynomial_w(const at::Tensor & x, const at::Scalar & n) {
12200return wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_w(x, n);
12201}
12202at::Tensor special_hermite_polynomial_h(const at::Scalar & x, const at::Tensor & n) {
12203return wrapper_CompositeImplicitAutograd_x_scalar_special_hermite_polynomial_h(x, n);
12204}
12205at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
12206return wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_h_out(x, n, out);
12207}
12208at::Tensor & special_hermite_polynomial_h_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12209return wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_h_out(x, n, out);
12210}
12211at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Scalar & n) {
12212return wrapper_CompositeImplicitAutograd_n_scalar_special_hermite_polynomial_h(x, n);
12213}
12214at::Tensor special_hermite_polynomial_he(const at::Scalar & x, const at::Tensor & n) {
12215return wrapper_CompositeImplicitAutograd_x_scalar_special_hermite_polynomial_he(x, n);
12216}
12217at::Tensor & special_hermite_polynomial_he_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
12218return wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_he_out(x, n, out);
12219}
12220at::Tensor & special_hermite_polynomial_he_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12221return wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_he_out(x, n, out);
12222}
12223at::Tensor special_hermite_polynomial_he(const at::Tensor & x, const at::Scalar & n) {
12224return wrapper_CompositeImplicitAutograd_n_scalar_special_hermite_polynomial_he(x, n);
12225}
12226at::Tensor special_laguerre_polynomial_l(const at::Scalar & x, const at::Tensor & n) {
12227return wrapper_CompositeImplicitAutograd_x_scalar_special_laguerre_polynomial_l(x, n);
12228}
12229at::Tensor & special_laguerre_polynomial_l_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
12230return wrapper_CompositeImplicitAutograd_x_scalar_out_special_laguerre_polynomial_l_out(x, n, out);
12231}
12232at::Tensor & special_laguerre_polynomial_l_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12233return wrapper_CompositeImplicitAutograd_x_scalar_out_special_laguerre_polynomial_l_out(x, n, out);
12234}
12235at::Tensor special_laguerre_polynomial_l(const at::Tensor & x, const at::Scalar & n) {
12236return wrapper_CompositeImplicitAutograd_n_scalar_special_laguerre_polynomial_l(x, n);
12237}
12238at::Tensor special_legendre_polynomial_p(const at::Scalar & x, const at::Tensor & n) {
12239return wrapper_CompositeImplicitAutograd_x_scalar_special_legendre_polynomial_p(x, n);
12240}
12241at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
12242return wrapper_CompositeImplicitAutograd_x_scalar_out_special_legendre_polynomial_p_out(x, n, out);
12243}
12244at::Tensor & special_legendre_polynomial_p_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12245return wrapper_CompositeImplicitAutograd_x_scalar_out_special_legendre_polynomial_p_out(x, n, out);
12246}
12247at::Tensor special_legendre_polynomial_p(const at::Tensor & x, const at::Scalar & n) {
12248return wrapper_CompositeImplicitAutograd_n_scalar_special_legendre_polynomial_p(x, n);
12249}
12250at::Tensor special_shifted_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n) {
12251return wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_t(x, n);
12252}
12253at::Tensor & special_shifted_chebyshev_polynomial_t_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
12254return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_t_out(x, n, out);
12255}
12256at::Tensor & special_shifted_chebyshev_polynomial_t_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12257return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_t_out(x, n, out);
12258}
12259at::Tensor special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n) {
12260return wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_t(x, n);
12261}
12262at::Tensor special_shifted_chebyshev_polynomial_u(const at::Scalar & x, const at::Tensor & n) {
12263return wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_u(x, n);
12264}
12265at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
12266return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_u_out(x, n, out);
12267}
12268at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12269return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_u_out(x, n, out);
12270}
12271at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Scalar & n) {
12272return wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_u(x, n);
12273}
12274at::Tensor special_shifted_chebyshev_polynomial_v(const at::Scalar & x, const at::Tensor & n) {
12275return wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_v(x, n);
12276}
12277at::Tensor & special_shifted_chebyshev_polynomial_v_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
12278return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_v_out(x, n, out);
12279}
12280at::Tensor & special_shifted_chebyshev_polynomial_v_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12281return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_v_out(x, n, out);
12282}
12283at::Tensor special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Scalar & n) {
12284return wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_v(x, n);
12285}
12286at::Tensor special_shifted_chebyshev_polynomial_w(const at::Scalar & x, const at::Tensor & n) {
12287return wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_w(x, n);
12288}
12289at::Tensor & special_shifted_chebyshev_polynomial_w_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
12290return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_w_out(x, n, out);
12291}
12292at::Tensor & special_shifted_chebyshev_polynomial_w_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
12293return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_w_out(x, n, out);
12294}
12295at::Tensor special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Scalar & n) {
12296return wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_w(x, n);
12297}
12298} // namespace compositeimplicitautograd
12299} // namespace at
12300