1 | // required for old g++ to compile PRId64 macros, see |
2 | // https://github.com/pytorch/pytorch/issues/3571 |
3 | // for context |
4 | #ifndef __STDC_FORMAT_MACROS |
5 | #define __STDC_FORMAT_MACROS |
6 | #endif |
7 | |
8 | // an external backend might generate file within its code tree |
9 | // and check all the source files within the tree with clang-format. |
10 | // so, disable it since the backend might have a different config. |
11 | // clang-format off |
12 | |
13 | // NOTE: This condition is true for all PyTorch internal libraries, it |
14 | // just excludes external projects such as torch_xla which |
15 | // re-use some of the PyTorch codegen machinery. |
16 | #if defined(CAFFE2_BUILD_MAIN_LIB) || \ |
17 | defined(TORCH_CUDA_BUILD_MAIN_LIB) || \ |
18 | defined(TORCH_HIP_BUILD_MAIN_LIB) || \ |
19 | defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \ |
20 | defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB) |
21 | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS |
22 | #endif |
23 | |
24 | // @generated by torchgen/gen.py from RegisterDispatchKey.cpp |
25 | |
26 | #include <c10/core/TensorImpl.h> |
27 | #include <c10/core/Allocator.h> |
28 | #include <ATen/DeviceGuard.h> |
29 | #include <ATen/NamedTensorUtils.h> |
30 | #include <ATen/Utils.h> |
31 | #include <ATen/WrapDimUtils.h> |
32 | #include <ATen/Dispatch.h> |
33 | #include <c10/util/ExclusivelyOwned.h> |
34 | #include <c10/util/Half.h> |
35 | #include <c10/core/UndefinedTensorImpl.h> |
36 | #include <c10/util/Optional.h> |
37 | #include <ATen/Tensor.h> |
38 | #include <ATen/native/Resize.h> |
39 | |
40 | #include <cstddef> |
41 | #include <functional> |
42 | #include <memory> |
43 | #include <utility> |
44 | |
45 | #include <ATen/Config.h> |
46 | #include <ATen/core/op_registration/adaption.h> |
47 | #include <torch/library.h> |
48 | |
49 | |
50 | #include <ATen/ops/as_strided_native.h> |
51 | #include <ATen/ops/empty.h> |
52 | #include <ATen/ops/empty_strided.h> |
53 | #include <ATen/ops/_copy_from_and_resize.h> |
54 | #include <ATen/ops/_copy_from.h> |
55 | #include <ATen/ops/_add_batch_dim_compositeimplicitautograd_dispatch.h> |
56 | #include <ATen/ops/_add_batch_dim_native.h> |
57 | #include <ATen/ops/_assert_tensor_metadata_compositeimplicitautograd_dispatch.h> |
58 | #include <ATen/ops/_assert_tensor_metadata_native.h> |
59 | #include <ATen/ops/_autocast_to_full_precision_compositeimplicitautograd_dispatch.h> |
60 | #include <ATen/ops/_autocast_to_full_precision_native.h> |
61 | #include <ATen/ops/_autocast_to_reduced_precision_compositeimplicitautograd_dispatch.h> |
62 | #include <ATen/ops/_autocast_to_reduced_precision_native.h> |
63 | #include <ATen/ops/_backward_compositeimplicitautograd_dispatch.h> |
64 | #include <ATen/ops/_backward_native.h> |
65 | #include <ATen/ops/_batch_norm_impl_index_backward_compositeimplicitautograd_dispatch.h> |
66 | #include <ATen/ops/_batch_norm_impl_index_backward_native.h> |
67 | #include <ATen/ops/_batch_norm_impl_index_compositeimplicitautograd_dispatch.h> |
68 | #include <ATen/ops/_batch_norm_impl_index_native.h> |
69 | #include <ATen/ops/_cast_Byte_compositeimplicitautograd_dispatch.h> |
70 | #include <ATen/ops/_cast_Byte_native.h> |
71 | #include <ATen/ops/_cast_Char_compositeimplicitautograd_dispatch.h> |
72 | #include <ATen/ops/_cast_Char_native.h> |
73 | #include <ATen/ops/_cast_Double_compositeimplicitautograd_dispatch.h> |
74 | #include <ATen/ops/_cast_Double_native.h> |
75 | #include <ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h> |
76 | #include <ATen/ops/_cast_Float_native.h> |
77 | #include <ATen/ops/_cast_Half_compositeimplicitautograd_dispatch.h> |
78 | #include <ATen/ops/_cast_Half_native.h> |
79 | #include <ATen/ops/_cast_Int_compositeimplicitautograd_dispatch.h> |
80 | #include <ATen/ops/_cast_Int_native.h> |
81 | #include <ATen/ops/_cast_Long_compositeimplicitautograd_dispatch.h> |
82 | #include <ATen/ops/_cast_Long_native.h> |
83 | #include <ATen/ops/_cast_Short_compositeimplicitautograd_dispatch.h> |
84 | #include <ATen/ops/_cast_Short_native.h> |
85 | #include <ATen/ops/_choose_qparams_per_tensor_compositeimplicitautograd_dispatch.h> |
86 | #include <ATen/ops/_choose_qparams_per_tensor_native.h> |
87 | #include <ATen/ops/_convolution_compositeimplicitautograd_dispatch.h> |
88 | #include <ATen/ops/_convolution_double_backward_compositeimplicitautograd_dispatch.h> |
89 | #include <ATen/ops/_convolution_double_backward_native.h> |
90 | #include <ATen/ops/_convolution_mode_compositeimplicitautograd_dispatch.h> |
91 | #include <ATen/ops/_convolution_mode_native.h> |
92 | #include <ATen/ops/_convolution_native.h> |
93 | #include <ATen/ops/_cufft_clear_plan_cache_compositeimplicitautograd_dispatch.h> |
94 | #include <ATen/ops/_cufft_clear_plan_cache_native.h> |
95 | #include <ATen/ops/_cufft_get_plan_cache_max_size_compositeimplicitautograd_dispatch.h> |
96 | #include <ATen/ops/_cufft_get_plan_cache_max_size_native.h> |
97 | #include <ATen/ops/_cufft_get_plan_cache_size_compositeimplicitautograd_dispatch.h> |
98 | #include <ATen/ops/_cufft_get_plan_cache_size_native.h> |
99 | #include <ATen/ops/_cufft_set_plan_cache_max_size_compositeimplicitautograd_dispatch.h> |
100 | #include <ATen/ops/_cufft_set_plan_cache_max_size_native.h> |
101 | #include <ATen/ops/_debug_has_internal_overlap_compositeimplicitautograd_dispatch.h> |
102 | #include <ATen/ops/_debug_has_internal_overlap_native.h> |
103 | #include <ATen/ops/_dim_arange_compositeimplicitautograd_dispatch.h> |
104 | #include <ATen/ops/_dim_arange_native.h> |
105 | #include <ATen/ops/_embedding_bag_backward_compositeimplicitautograd_dispatch.h> |
106 | #include <ATen/ops/_embedding_bag_backward_native.h> |
107 | #include <ATen/ops/_embedding_bag_sparse_backward_compositeimplicitautograd_dispatch.h> |
108 | #include <ATen/ops/_embedding_bag_sparse_backward_native.h> |
109 | #include <ATen/ops/_gather_sparse_backward_compositeimplicitautograd_dispatch.h> |
110 | #include <ATen/ops/_gather_sparse_backward_native.h> |
111 | #include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_compositeimplicitautograd_dispatch.h> |
112 | #include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_native.h> |
113 | #include <ATen/ops/_has_compatible_shallow_copy_type_compositeimplicitautograd_dispatch.h> |
114 | #include <ATen/ops/_has_compatible_shallow_copy_type_native.h> |
115 | #include <ATen/ops/_is_zerotensor_compositeimplicitautograd_dispatch.h> |
116 | #include <ATen/ops/_is_zerotensor_native.h> |
117 | #include <ATen/ops/_lu_with_info_compositeimplicitautograd_dispatch.h> |
118 | #include <ATen/ops/_lu_with_info_native.h> |
119 | #include <ATen/ops/_nnpack_available_compositeimplicitautograd_dispatch.h> |
120 | #include <ATen/ops/_nnpack_available_native.h> |
121 | #include <ATen/ops/_pack_padded_sequence_backward_compositeimplicitautograd_dispatch.h> |
122 | #include <ATen/ops/_pack_padded_sequence_backward_native.h> |
123 | #include <ATen/ops/_pad_circular_compositeimplicitautograd_dispatch.h> |
124 | #include <ATen/ops/_pad_circular_native.h> |
125 | #include <ATen/ops/_pad_enum_compositeimplicitautograd_dispatch.h> |
126 | #include <ATen/ops/_pad_enum_native.h> |
127 | #include <ATen/ops/_pad_packed_sequence_compositeimplicitautograd_dispatch.h> |
128 | #include <ATen/ops/_pad_packed_sequence_native.h> |
129 | #include <ATen/ops/_remove_batch_dim_compositeimplicitautograd_dispatch.h> |
130 | #include <ATen/ops/_remove_batch_dim_native.h> |
131 | #include <ATen/ops/_reshape_from_tensor_compositeimplicitautograd_dispatch.h> |
132 | #include <ATen/ops/_reshape_from_tensor_native.h> |
133 | #include <ATen/ops/_rowwise_prune_compositeimplicitautograd_dispatch.h> |
134 | #include <ATen/ops/_rowwise_prune_native.h> |
135 | #include <ATen/ops/_saturate_weight_to_fp16_compositeimplicitautograd_dispatch.h> |
136 | #include <ATen/ops/_saturate_weight_to_fp16_native.h> |
137 | #include <ATen/ops/_scaled_dot_product_attention_compositeimplicitautograd_dispatch.h> |
138 | #include <ATen/ops/_scaled_dot_product_attention_math_compositeimplicitautograd_dispatch.h> |
139 | #include <ATen/ops/_scaled_dot_product_attention_math_native.h> |
140 | #include <ATen/ops/_scaled_dot_product_attention_native.h> |
141 | #include <ATen/ops/_shape_as_tensor_compositeimplicitautograd_dispatch.h> |
142 | #include <ATen/ops/_shape_as_tensor_native.h> |
143 | #include <ATen/ops/_sobol_engine_draw_compositeimplicitautograd_dispatch.h> |
144 | #include <ATen/ops/_sobol_engine_draw_native.h> |
145 | #include <ATen/ops/_sobol_engine_ff_compositeimplicitautograd_dispatch.h> |
146 | #include <ATen/ops/_sobol_engine_ff_native.h> |
147 | #include <ATen/ops/_sobol_engine_initialize_state_compositeimplicitautograd_dispatch.h> |
148 | #include <ATen/ops/_sobol_engine_initialize_state_native.h> |
149 | #include <ATen/ops/_sobol_engine_scramble_compositeimplicitautograd_dispatch.h> |
150 | #include <ATen/ops/_sobol_engine_scramble_native.h> |
151 | #include <ATen/ops/_sparse_bsc_tensor_unsafe_compositeimplicitautograd_dispatch.h> |
152 | #include <ATen/ops/_sparse_bsc_tensor_unsafe_native.h> |
153 | #include <ATen/ops/_sparse_bsr_tensor_unsafe_compositeimplicitautograd_dispatch.h> |
154 | #include <ATen/ops/_sparse_bsr_tensor_unsafe_native.h> |
155 | #include <ATen/ops/_sparse_compressed_tensor_unsafe_compositeimplicitautograd_dispatch.h> |
156 | #include <ATen/ops/_sparse_compressed_tensor_unsafe_native.h> |
157 | #include <ATen/ops/_sparse_coo_tensor_unsafe_compositeimplicitautograd_dispatch.h> |
158 | #include <ATen/ops/_sparse_coo_tensor_unsafe_native.h> |
159 | #include <ATen/ops/_sparse_csc_tensor_unsafe_compositeimplicitautograd_dispatch.h> |
160 | #include <ATen/ops/_sparse_csc_tensor_unsafe_native.h> |
161 | #include <ATen/ops/_sparse_csr_tensor_unsafe_compositeimplicitautograd_dispatch.h> |
162 | #include <ATen/ops/_sparse_csr_tensor_unsafe_native.h> |
163 | #include <ATen/ops/_sparse_log_softmax_compositeimplicitautograd_dispatch.h> |
164 | #include <ATen/ops/_sparse_log_softmax_native.h> |
165 | #include <ATen/ops/_sparse_mm_compositeimplicitautograd_dispatch.h> |
166 | #include <ATen/ops/_sparse_mm_native.h> |
167 | #include <ATen/ops/_sparse_softmax_compositeimplicitautograd_dispatch.h> |
168 | #include <ATen/ops/_sparse_softmax_native.h> |
169 | #include <ATen/ops/_sparse_sum_compositeimplicitautograd_dispatch.h> |
170 | #include <ATen/ops/_sparse_sum_native.h> |
171 | #include <ATen/ops/_test_ambiguous_defaults_compositeimplicitautograd_dispatch.h> |
172 | #include <ATen/ops/_test_ambiguous_defaults_native.h> |
173 | #include <ATen/ops/_test_autograd_multiple_dispatch_compositeimplicitautograd_dispatch.h> |
174 | #include <ATen/ops/_test_autograd_multiple_dispatch_native.h> |
175 | #include <ATen/ops/_test_check_tensor_compositeimplicitautograd_dispatch.h> |
176 | #include <ATen/ops/_test_check_tensor_native.h> |
177 | #include <ATen/ops/_test_serialization_subcmul_compositeimplicitautograd_dispatch.h> |
178 | #include <ATen/ops/_test_serialization_subcmul_native.h> |
179 | #include <ATen/ops/_test_string_default_compositeimplicitautograd_dispatch.h> |
180 | #include <ATen/ops/_test_string_default_native.h> |
181 | #include <ATen/ops/_thnn_differentiable_gru_cell_backward_compositeimplicitautograd_dispatch.h> |
182 | #include <ATen/ops/_thnn_differentiable_gru_cell_backward_native.h> |
183 | #include <ATen/ops/_thnn_differentiable_lstm_cell_backward_compositeimplicitautograd_dispatch.h> |
184 | #include <ATen/ops/_thnn_differentiable_lstm_cell_backward_native.h> |
185 | #include <ATen/ops/_thnn_fused_lstm_cell_backward_compositeimplicitautograd_dispatch.h> |
186 | #include <ATen/ops/_thnn_fused_lstm_cell_backward_native.h> |
187 | #include <ATen/ops/_to_cpu_compositeimplicitautograd_dispatch.h> |
188 | #include <ATen/ops/_to_cpu_native.h> |
189 | #include <ATen/ops/_unpack_dual_compositeimplicitautograd_dispatch.h> |
190 | #include <ATen/ops/_unpack_dual_native.h> |
191 | #include <ATen/ops/_upsample_bicubic2d_aa_compositeimplicitautograd_dispatch.h> |
192 | #include <ATen/ops/_upsample_bicubic2d_aa_native.h> |
193 | #include <ATen/ops/_upsample_bilinear2d_aa_compositeimplicitautograd_dispatch.h> |
194 | #include <ATen/ops/_upsample_bilinear2d_aa_native.h> |
195 | #include <ATen/ops/_upsample_nearest_exact1d_compositeimplicitautograd_dispatch.h> |
196 | #include <ATen/ops/_upsample_nearest_exact1d_native.h> |
197 | #include <ATen/ops/_upsample_nearest_exact2d_compositeimplicitautograd_dispatch.h> |
198 | #include <ATen/ops/_upsample_nearest_exact2d_native.h> |
199 | #include <ATen/ops/_upsample_nearest_exact3d_compositeimplicitautograd_dispatch.h> |
200 | #include <ATen/ops/_upsample_nearest_exact3d_native.h> |
201 | #include <ATen/ops/_use_cudnn_rnn_flatten_weight_compositeimplicitautograd_dispatch.h> |
202 | #include <ATen/ops/_use_cudnn_rnn_flatten_weight_native.h> |
203 | #include <ATen/ops/_validate_sparse_bsc_tensor_args_compositeimplicitautograd_dispatch.h> |
204 | #include <ATen/ops/_validate_sparse_bsc_tensor_args_native.h> |
205 | #include <ATen/ops/_validate_sparse_bsr_tensor_args_compositeimplicitautograd_dispatch.h> |
206 | #include <ATen/ops/_validate_sparse_bsr_tensor_args_native.h> |
207 | #include <ATen/ops/_validate_sparse_compressed_tensor_args_compositeimplicitautograd_dispatch.h> |
208 | #include <ATen/ops/_validate_sparse_compressed_tensor_args_native.h> |
209 | #include <ATen/ops/_validate_sparse_coo_tensor_args_compositeimplicitautograd_dispatch.h> |
210 | #include <ATen/ops/_validate_sparse_coo_tensor_args_native.h> |
211 | #include <ATen/ops/_validate_sparse_csc_tensor_args_compositeimplicitautograd_dispatch.h> |
212 | #include <ATen/ops/_validate_sparse_csc_tensor_args_native.h> |
213 | #include <ATen/ops/_validate_sparse_csr_tensor_args_compositeimplicitautograd_dispatch.h> |
214 | #include <ATen/ops/_validate_sparse_csr_tensor_args_native.h> |
215 | #include <ATen/ops/_version_compositeimplicitautograd_dispatch.h> |
216 | #include <ATen/ops/_version_native.h> |
217 | #include <ATen/ops/_weight_norm_compositeimplicitautograd_dispatch.h> |
218 | #include <ATen/ops/_weight_norm_differentiable_backward_compositeimplicitautograd_dispatch.h> |
219 | #include <ATen/ops/_weight_norm_differentiable_backward_native.h> |
220 | #include <ATen/ops/_weight_norm_native.h> |
221 | #include <ATen/ops/absolute_compositeimplicitautograd_dispatch.h> |
222 | #include <ATen/ops/absolute_native.h> |
223 | #include <ATen/ops/adaptive_avg_pool1d_compositeimplicitautograd_dispatch.h> |
224 | #include <ATen/ops/adaptive_avg_pool1d_native.h> |
225 | #include <ATen/ops/adaptive_avg_pool2d_compositeimplicitautograd_dispatch.h> |
226 | #include <ATen/ops/adaptive_avg_pool2d_native.h> |
227 | #include <ATen/ops/adaptive_avg_pool3d_compositeimplicitautograd_dispatch.h> |
228 | #include <ATen/ops/adaptive_avg_pool3d_native.h> |
229 | #include <ATen/ops/adaptive_max_pool1d_compositeimplicitautograd_dispatch.h> |
230 | #include <ATen/ops/adaptive_max_pool1d_native.h> |
231 | #include <ATen/ops/adjoint_compositeimplicitautograd_dispatch.h> |
232 | #include <ATen/ops/adjoint_native.h> |
233 | #include <ATen/ops/affine_grid_generator_backward_compositeimplicitautograd_dispatch.h> |
234 | #include <ATen/ops/affine_grid_generator_backward_native.h> |
235 | #include <ATen/ops/align_as_compositeimplicitautograd_dispatch.h> |
236 | #include <ATen/ops/align_as_native.h> |
237 | #include <ATen/ops/align_tensors_compositeimplicitautograd_dispatch.h> |
238 | #include <ATen/ops/align_tensors_native.h> |
239 | #include <ATen/ops/align_to_compositeimplicitautograd_dispatch.h> |
240 | #include <ATen/ops/align_to_native.h> |
241 | #include <ATen/ops/all_compositeimplicitautograd_dispatch.h> |
242 | #include <ATen/ops/all_native.h> |
243 | #include <ATen/ops/alpha_dropout_compositeimplicitautograd_dispatch.h> |
244 | #include <ATen/ops/alpha_dropout_native.h> |
245 | #include <ATen/ops/and_compositeimplicitautograd_dispatch.h> |
246 | #include <ATen/ops/and_native.h> |
247 | #include <ATen/ops/any_compositeimplicitautograd_dispatch.h> |
248 | #include <ATen/ops/any_native.h> |
249 | #include <ATen/ops/arccos_compositeimplicitautograd_dispatch.h> |
250 | #include <ATen/ops/arccos_native.h> |
251 | #include <ATen/ops/arccosh_compositeimplicitautograd_dispatch.h> |
252 | #include <ATen/ops/arccosh_native.h> |
253 | #include <ATen/ops/arcsin_compositeimplicitautograd_dispatch.h> |
254 | #include <ATen/ops/arcsin_native.h> |
255 | #include <ATen/ops/arcsinh_compositeimplicitautograd_dispatch.h> |
256 | #include <ATen/ops/arcsinh_native.h> |
257 | #include <ATen/ops/arctan2_compositeimplicitautograd_dispatch.h> |
258 | #include <ATen/ops/arctan2_native.h> |
259 | #include <ATen/ops/arctan_compositeimplicitautograd_dispatch.h> |
260 | #include <ATen/ops/arctan_native.h> |
261 | #include <ATen/ops/arctanh_compositeimplicitautograd_dispatch.h> |
262 | #include <ATen/ops/arctanh_native.h> |
263 | #include <ATen/ops/argsort_compositeimplicitautograd_dispatch.h> |
264 | #include <ATen/ops/argsort_native.h> |
265 | #include <ATen/ops/argwhere_compositeimplicitautograd_dispatch.h> |
266 | #include <ATen/ops/argwhere_native.h> |
267 | #include <ATen/ops/atleast_1d_compositeimplicitautograd_dispatch.h> |
268 | #include <ATen/ops/atleast_1d_native.h> |
269 | #include <ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h> |
270 | #include <ATen/ops/atleast_2d_native.h> |
271 | #include <ATen/ops/atleast_3d_compositeimplicitautograd_dispatch.h> |
272 | #include <ATen/ops/atleast_3d_native.h> |
273 | #include <ATen/ops/avg_pool1d_compositeimplicitautograd_dispatch.h> |
274 | #include <ATen/ops/avg_pool1d_native.h> |
275 | #include <ATen/ops/batch_norm_compositeimplicitautograd_dispatch.h> |
276 | #include <ATen/ops/batch_norm_native.h> |
277 | #include <ATen/ops/bilinear_compositeimplicitautograd_dispatch.h> |
278 | #include <ATen/ops/bilinear_native.h> |
279 | #include <ATen/ops/bitwise_and_compositeimplicitautograd_dispatch.h> |
280 | #include <ATen/ops/bitwise_and_native.h> |
281 | #include <ATen/ops/bitwise_or_compositeimplicitautograd_dispatch.h> |
282 | #include <ATen/ops/bitwise_or_native.h> |
283 | #include <ATen/ops/bitwise_xor_compositeimplicitautograd_dispatch.h> |
284 | #include <ATen/ops/bitwise_xor_native.h> |
285 | #include <ATen/ops/broadcast_tensors_compositeimplicitautograd_dispatch.h> |
286 | #include <ATen/ops/broadcast_tensors_native.h> |
287 | #include <ATen/ops/broadcast_to_compositeimplicitautograd_dispatch.h> |
288 | #include <ATen/ops/broadcast_to_native.h> |
289 | #include <ATen/ops/can_cast_compositeimplicitautograd_dispatch.h> |
290 | #include <ATen/ops/can_cast_native.h> |
291 | #include <ATen/ops/cartesian_prod_compositeimplicitautograd_dispatch.h> |
292 | #include <ATen/ops/cartesian_prod_native.h> |
293 | #include <ATen/ops/cat_compositeimplicitautograd_dispatch.h> |
294 | #include <ATen/ops/cat_native.h> |
295 | #include <ATen/ops/cdist_compositeimplicitautograd_dispatch.h> |
296 | #include <ATen/ops/cdist_native.h> |
297 | #include <ATen/ops/chain_matmul_compositeimplicitautograd_dispatch.h> |
298 | #include <ATen/ops/chain_matmul_native.h> |
299 | #include <ATen/ops/chalf_compositeimplicitautograd_dispatch.h> |
300 | #include <ATen/ops/chalf_native.h> |
301 | #include <ATen/ops/choose_qparams_optimized_compositeimplicitautograd_dispatch.h> |
302 | #include <ATen/ops/choose_qparams_optimized_native.h> |
303 | #include <ATen/ops/chunk_compositeimplicitautograd_dispatch.h> |
304 | #include <ATen/ops/chunk_native.h> |
305 | #include <ATen/ops/clip_compositeimplicitautograd_dispatch.h> |
306 | #include <ATen/ops/clip_native.h> |
307 | #include <ATen/ops/coalesce_compositeimplicitautograd_dispatch.h> |
308 | #include <ATen/ops/coalesce_native.h> |
309 | #include <ATen/ops/column_stack_compositeimplicitautograd_dispatch.h> |
310 | #include <ATen/ops/column_stack_native.h> |
311 | #include <ATen/ops/combinations_compositeimplicitautograd_dispatch.h> |
312 | #include <ATen/ops/combinations_native.h> |
313 | #include <ATen/ops/concat_compositeimplicitautograd_dispatch.h> |
314 | #include <ATen/ops/concat_native.h> |
315 | #include <ATen/ops/concatenate_compositeimplicitautograd_dispatch.h> |
316 | #include <ATen/ops/concatenate_native.h> |
317 | #include <ATen/ops/conj_compositeimplicitautograd_dispatch.h> |
318 | #include <ATen/ops/conj_native.h> |
319 | #include <ATen/ops/conj_physical_compositeimplicitautograd_dispatch.h> |
320 | #include <ATen/ops/conj_physical_native.h> |
321 | #include <ATen/ops/contiguous_compositeimplicitautograd_dispatch.h> |
322 | #include <ATen/ops/contiguous_native.h> |
323 | #include <ATen/ops/conv1d_compositeimplicitautograd_dispatch.h> |
324 | #include <ATen/ops/conv1d_native.h> |
325 | #include <ATen/ops/conv2d_compositeimplicitautograd_dispatch.h> |
326 | #include <ATen/ops/conv2d_native.h> |
327 | #include <ATen/ops/conv3d_compositeimplicitautograd_dispatch.h> |
328 | #include <ATen/ops/conv3d_native.h> |
329 | #include <ATen/ops/conv_tbc_backward_compositeimplicitautograd_dispatch.h> |
330 | #include <ATen/ops/conv_tbc_backward_native.h> |
331 | #include <ATen/ops/conv_transpose1d_compositeimplicitautograd_dispatch.h> |
332 | #include <ATen/ops/conv_transpose1d_native.h> |
333 | #include <ATen/ops/conv_transpose2d_compositeimplicitautograd_dispatch.h> |
334 | #include <ATen/ops/conv_transpose2d_native.h> |
335 | #include <ATen/ops/conv_transpose3d_compositeimplicitautograd_dispatch.h> |
336 | #include <ATen/ops/conv_transpose3d_native.h> |
337 | #include <ATen/ops/corrcoef_compositeimplicitautograd_dispatch.h> |
338 | #include <ATen/ops/corrcoef_native.h> |
339 | #include <ATen/ops/cosine_embedding_loss_compositeimplicitautograd_dispatch.h> |
340 | #include <ATen/ops/cosine_embedding_loss_native.h> |
341 | #include <ATen/ops/cosine_similarity_compositeimplicitautograd_dispatch.h> |
342 | #include <ATen/ops/cosine_similarity_native.h> |
343 | #include <ATen/ops/cov_compositeimplicitautograd_dispatch.h> |
344 | #include <ATen/ops/cov_native.h> |
345 | #include <ATen/ops/cross_compositeimplicitautograd_dispatch.h> |
346 | #include <ATen/ops/cross_entropy_loss_compositeimplicitautograd_dispatch.h> |
347 | #include <ATen/ops/cross_entropy_loss_native.h> |
348 | #include <ATen/ops/cross_native.h> |
349 | #include <ATen/ops/ctc_loss_compositeimplicitautograd_dispatch.h> |
350 | #include <ATen/ops/ctc_loss_native.h> |
351 | #include <ATen/ops/cudnn_is_acceptable_compositeimplicitautograd_dispatch.h> |
352 | #include <ATen/ops/cudnn_is_acceptable_native.h> |
353 | #include <ATen/ops/cummax_compositeimplicitautograd_dispatch.h> |
354 | #include <ATen/ops/cummax_native.h> |
355 | #include <ATen/ops/cummaxmin_backward_compositeimplicitautograd_dispatch.h> |
356 | #include <ATen/ops/cummaxmin_backward_native.h> |
357 | #include <ATen/ops/cummin_compositeimplicitautograd_dispatch.h> |
358 | #include <ATen/ops/cummin_native.h> |
359 | #include <ATen/ops/cumprod_backward_compositeimplicitautograd_dispatch.h> |
360 | #include <ATen/ops/cumprod_backward_native.h> |
361 | #include <ATen/ops/cumprod_compositeimplicitautograd_dispatch.h> |
362 | #include <ATen/ops/cumprod_native.h> |
363 | #include <ATen/ops/cumsum_compositeimplicitautograd_dispatch.h> |
364 | #include <ATen/ops/cumsum_native.h> |
365 | #include <ATen/ops/cumulative_trapezoid_compositeimplicitautograd_dispatch.h> |
366 | #include <ATen/ops/cumulative_trapezoid_native.h> |
367 | #include <ATen/ops/data_compositeimplicitautograd_dispatch.h> |
368 | #include <ATen/ops/data_native.h> |
369 | #include <ATen/ops/det_compositeimplicitautograd_dispatch.h> |
370 | #include <ATen/ops/det_native.h> |
371 | #include <ATen/ops/diag_compositeimplicitautograd_dispatch.h> |
372 | #include <ATen/ops/diag_native.h> |
373 | #include <ATen/ops/diagflat_compositeimplicitautograd_dispatch.h> |
374 | #include <ATen/ops/diagflat_native.h> |
375 | #include <ATen/ops/diagonal_compositeimplicitautograd_dispatch.h> |
376 | #include <ATen/ops/diagonal_native.h> |
377 | #include <ATen/ops/diff_compositeimplicitautograd_dispatch.h> |
378 | #include <ATen/ops/diff_native.h> |
379 | #include <ATen/ops/divide_compositeimplicitautograd_dispatch.h> |
380 | #include <ATen/ops/divide_native.h> |
381 | #include <ATen/ops/dropout_compositeimplicitautograd_dispatch.h> |
382 | #include <ATen/ops/dropout_native.h> |
383 | #include <ATen/ops/dsplit_compositeimplicitautograd_dispatch.h> |
384 | #include <ATen/ops/dsplit_native.h> |
385 | #include <ATen/ops/dstack_compositeimplicitautograd_dispatch.h> |
386 | #include <ATen/ops/dstack_native.h> |
387 | #include <ATen/ops/einsum_compositeimplicitautograd_dispatch.h> |
388 | #include <ATen/ops/einsum_native.h> |
389 | #include <ATen/ops/embedding_backward_compositeimplicitautograd_dispatch.h> |
390 | #include <ATen/ops/embedding_backward_native.h> |
391 | #include <ATen/ops/embedding_bag_compositeimplicitautograd_dispatch.h> |
392 | #include <ATen/ops/embedding_bag_native.h> |
393 | #include <ATen/ops/embedding_sparse_backward_compositeimplicitautograd_dispatch.h> |
394 | #include <ATen/ops/embedding_sparse_backward_native.h> |
395 | #include <ATen/ops/empty_compositeimplicitautograd_dispatch.h> |
396 | #include <ATen/ops/empty_native.h> |
397 | #include <ATen/ops/expand_as_compositeimplicitautograd_dispatch.h> |
398 | #include <ATen/ops/expand_as_native.h> |
399 | #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_compositeimplicitautograd_dispatch.h> |
400 | #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_native.h> |
401 | #include <ATen/ops/fake_quantize_per_channel_affine_compositeimplicitautograd_dispatch.h> |
402 | #include <ATen/ops/fake_quantize_per_channel_affine_native.h> |
403 | #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_compositeimplicitautograd_dispatch.h> |
404 | #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_native.h> |
405 | #include <ATen/ops/fake_quantize_per_tensor_affine_compositeimplicitautograd_dispatch.h> |
406 | #include <ATen/ops/fake_quantize_per_tensor_affine_native.h> |
407 | #include <ATen/ops/fbgemm_linear_fp16_weight_compositeimplicitautograd_dispatch.h> |
408 | #include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_compositeimplicitautograd_dispatch.h> |
409 | #include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_native.h> |
410 | #include <ATen/ops/fbgemm_linear_fp16_weight_native.h> |
411 | #include <ATen/ops/fbgemm_linear_int8_weight_compositeimplicitautograd_dispatch.h> |
412 | #include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_compositeimplicitautograd_dispatch.h> |
413 | #include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_native.h> |
414 | #include <ATen/ops/fbgemm_linear_int8_weight_native.h> |
415 | #include <ATen/ops/fbgemm_linear_quantize_weight_compositeimplicitautograd_dispatch.h> |
416 | #include <ATen/ops/fbgemm_linear_quantize_weight_native.h> |
417 | #include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_compositeimplicitautograd_dispatch.h> |
418 | #include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_native.h> |
419 | #include <ATen/ops/fbgemm_pack_quantized_matrix_compositeimplicitautograd_dispatch.h> |
420 | #include <ATen/ops/fbgemm_pack_quantized_matrix_native.h> |
421 | #include <ATen/ops/feature_alpha_dropout_compositeimplicitautograd_dispatch.h> |
422 | #include <ATen/ops/feature_alpha_dropout_native.h> |
423 | #include <ATen/ops/feature_dropout_compositeimplicitautograd_dispatch.h> |
424 | #include <ATen/ops/feature_dropout_native.h> |
425 | #include <ATen/ops/fft_fft2_compositeimplicitautograd_dispatch.h> |
426 | #include <ATen/ops/fft_fft2_native.h> |
427 | #include <ATen/ops/fft_fft_compositeimplicitautograd_dispatch.h> |
428 | #include <ATen/ops/fft_fft_native.h> |
429 | #include <ATen/ops/fft_fftn_compositeimplicitautograd_dispatch.h> |
430 | #include <ATen/ops/fft_fftn_native.h> |
431 | #include <ATen/ops/fft_fftshift_compositeimplicitautograd_dispatch.h> |
432 | #include <ATen/ops/fft_fftshift_native.h> |
433 | #include <ATen/ops/fft_hfft2_compositeimplicitautograd_dispatch.h> |
434 | #include <ATen/ops/fft_hfft2_native.h> |
435 | #include <ATen/ops/fft_hfft_compositeimplicitautograd_dispatch.h> |
436 | #include <ATen/ops/fft_hfft_native.h> |
437 | #include <ATen/ops/fft_hfftn_compositeimplicitautograd_dispatch.h> |
438 | #include <ATen/ops/fft_hfftn_native.h> |
439 | #include <ATen/ops/fft_ifft2_compositeimplicitautograd_dispatch.h> |
440 | #include <ATen/ops/fft_ifft2_native.h> |
441 | #include <ATen/ops/fft_ifft_compositeimplicitautograd_dispatch.h> |
442 | #include <ATen/ops/fft_ifft_native.h> |
443 | #include <ATen/ops/fft_ifftn_compositeimplicitautograd_dispatch.h> |
444 | #include <ATen/ops/fft_ifftn_native.h> |
445 | #include <ATen/ops/fft_ifftshift_compositeimplicitautograd_dispatch.h> |
446 | #include <ATen/ops/fft_ifftshift_native.h> |
447 | #include <ATen/ops/fft_ihfft2_compositeimplicitautograd_dispatch.h> |
448 | #include <ATen/ops/fft_ihfft2_native.h> |
449 | #include <ATen/ops/fft_ihfft_compositeimplicitautograd_dispatch.h> |
450 | #include <ATen/ops/fft_ihfft_native.h> |
451 | #include <ATen/ops/fft_ihfftn_compositeimplicitautograd_dispatch.h> |
452 | #include <ATen/ops/fft_ihfftn_native.h> |
453 | #include <ATen/ops/fft_irfft2_compositeimplicitautograd_dispatch.h> |
454 | #include <ATen/ops/fft_irfft2_native.h> |
455 | #include <ATen/ops/fft_irfft_compositeimplicitautograd_dispatch.h> |
456 | #include <ATen/ops/fft_irfft_native.h> |
457 | #include <ATen/ops/fft_irfftn_compositeimplicitautograd_dispatch.h> |
458 | #include <ATen/ops/fft_irfftn_native.h> |
459 | #include <ATen/ops/fft_rfft2_compositeimplicitautograd_dispatch.h> |
460 | #include <ATen/ops/fft_rfft2_native.h> |
461 | #include <ATen/ops/fft_rfft_compositeimplicitautograd_dispatch.h> |
462 | #include <ATen/ops/fft_rfft_native.h> |
463 | #include <ATen/ops/fft_rfftn_compositeimplicitautograd_dispatch.h> |
464 | #include <ATen/ops/fft_rfftn_native.h> |
465 | #include <ATen/ops/fill_diagonal_compositeimplicitautograd_dispatch.h> |
466 | #include <ATen/ops/fill_diagonal_native.h> |
467 | #include <ATen/ops/fix_compositeimplicitautograd_dispatch.h> |
468 | #include <ATen/ops/fix_native.h> |
469 | #include <ATen/ops/flatten_compositeimplicitautograd_dispatch.h> |
470 | #include <ATen/ops/flatten_dense_tensors_compositeimplicitautograd_dispatch.h> |
471 | #include <ATen/ops/flatten_dense_tensors_native.h> |
472 | #include <ATen/ops/flatten_native.h> |
473 | #include <ATen/ops/fliplr_compositeimplicitautograd_dispatch.h> |
474 | #include <ATen/ops/fliplr_native.h> |
475 | #include <ATen/ops/flipud_compositeimplicitautograd_dispatch.h> |
476 | #include <ATen/ops/flipud_native.h> |
477 | #include <ATen/ops/float_power_compositeimplicitautograd_dispatch.h> |
478 | #include <ATen/ops/float_power_native.h> |
479 | #include <ATen/ops/floor_divide_compositeimplicitautograd_dispatch.h> |
480 | #include <ATen/ops/floor_divide_native.h> |
481 | #include <ATen/ops/frobenius_norm_compositeimplicitautograd_dispatch.h> |
482 | #include <ATen/ops/frobenius_norm_native.h> |
483 | #include <ATen/ops/fused_moving_avg_obs_fake_quant_compositeimplicitautograd_dispatch.h> |
484 | #include <ATen/ops/fused_moving_avg_obs_fake_quant_native.h> |
485 | #include <ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h> |
486 | #include <ATen/ops/gather_backward_native.h> |
487 | #include <ATen/ops/gather_compositeimplicitautograd_dispatch.h> |
488 | #include <ATen/ops/gather_native.h> |
489 | #include <ATen/ops/ger_compositeimplicitautograd_dispatch.h> |
490 | #include <ATen/ops/ger_native.h> |
491 | #include <ATen/ops/gradient_compositeimplicitautograd_dispatch.h> |
492 | #include <ATen/ops/gradient_native.h> |
493 | #include <ATen/ops/greater_compositeimplicitautograd_dispatch.h> |
494 | #include <ATen/ops/greater_equal_compositeimplicitautograd_dispatch.h> |
495 | #include <ATen/ops/greater_equal_native.h> |
496 | #include <ATen/ops/greater_native.h> |
497 | #include <ATen/ops/grid_sampler_compositeimplicitautograd_dispatch.h> |
498 | #include <ATen/ops/grid_sampler_native.h> |
499 | #include <ATen/ops/group_norm_compositeimplicitautograd_dispatch.h> |
500 | #include <ATen/ops/group_norm_native.h> |
501 | #include <ATen/ops/gru_cell_compositeimplicitautograd_dispatch.h> |
502 | #include <ATen/ops/gru_cell_native.h> |
503 | #include <ATen/ops/gru_compositeimplicitautograd_dispatch.h> |
504 | #include <ATen/ops/gru_native.h> |
505 | #include <ATen/ops/hinge_embedding_loss_compositeimplicitautograd_dispatch.h> |
506 | #include <ATen/ops/hinge_embedding_loss_native.h> |
507 | #include <ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h> |
508 | #include <ATen/ops/histogramdd_native.h> |
509 | #include <ATen/ops/hsplit_compositeimplicitautograd_dispatch.h> |
510 | #include <ATen/ops/hsplit_native.h> |
511 | #include <ATen/ops/hstack_compositeimplicitautograd_dispatch.h> |
512 | #include <ATen/ops/hstack_native.h> |
513 | #include <ATen/ops/imag_compositeimplicitautograd_dispatch.h> |
514 | #include <ATen/ops/imag_native.h> |
515 | #include <ATen/ops/index_add_compositeimplicitautograd_dispatch.h> |
516 | #include <ATen/ops/index_add_native.h> |
517 | #include <ATen/ops/index_copy_compositeimplicitautograd_dispatch.h> |
518 | #include <ATen/ops/index_copy_native.h> |
519 | #include <ATen/ops/index_fill_compositeimplicitautograd_dispatch.h> |
520 | #include <ATen/ops/index_fill_native.h> |
521 | #include <ATen/ops/index_select_backward_compositeimplicitautograd_dispatch.h> |
522 | #include <ATen/ops/index_select_backward_native.h> |
523 | #include <ATen/ops/index_select_compositeimplicitautograd_dispatch.h> |
524 | #include <ATen/ops/index_select_native.h> |
525 | #include <ATen/ops/infinitely_differentiable_gelu_backward_compositeimplicitautograd_dispatch.h> |
526 | #include <ATen/ops/infinitely_differentiable_gelu_backward_native.h> |
527 | #include <ATen/ops/inner_compositeimplicitautograd_dispatch.h> |
528 | #include <ATen/ops/inner_native.h> |
529 | #include <ATen/ops/instance_norm_compositeimplicitautograd_dispatch.h> |
530 | #include <ATen/ops/instance_norm_native.h> |
531 | #include <ATen/ops/inverse_compositeimplicitautograd_dispatch.h> |
532 | #include <ATen/ops/inverse_native.h> |
533 | #include <ATen/ops/is_complex_compositeimplicitautograd_dispatch.h> |
534 | #include <ATen/ops/is_complex_native.h> |
535 | #include <ATen/ops/is_conj_compositeimplicitautograd_dispatch.h> |
536 | #include <ATen/ops/is_conj_native.h> |
537 | #include <ATen/ops/is_distributed_compositeimplicitautograd_dispatch.h> |
538 | #include <ATen/ops/is_distributed_native.h> |
539 | #include <ATen/ops/is_floating_point_compositeimplicitautograd_dispatch.h> |
540 | #include <ATen/ops/is_floating_point_native.h> |
541 | #include <ATen/ops/is_inference_compositeimplicitautograd_dispatch.h> |
542 | #include <ATen/ops/is_inference_native.h> |
543 | #include <ATen/ops/is_leaf_compositeimplicitautograd_dispatch.h> |
544 | #include <ATen/ops/is_leaf_native.h> |
545 | #include <ATen/ops/is_neg_compositeimplicitautograd_dispatch.h> |
546 | #include <ATen/ops/is_neg_native.h> |
547 | #include <ATen/ops/is_nonzero_compositeimplicitautograd_dispatch.h> |
548 | #include <ATen/ops/is_nonzero_native.h> |
549 | #include <ATen/ops/is_signed_compositeimplicitautograd_dispatch.h> |
550 | #include <ATen/ops/is_signed_native.h> |
551 | #include <ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h> |
552 | #include <ATen/ops/is_vulkan_available_native.h> |
553 | #include <ATen/ops/isclose_compositeimplicitautograd_dispatch.h> |
554 | #include <ATen/ops/isclose_native.h> |
555 | #include <ATen/ops/isfinite_compositeimplicitautograd_dispatch.h> |
556 | #include <ATen/ops/isfinite_native.h> |
557 | #include <ATen/ops/isreal_compositeimplicitautograd_dispatch.h> |
558 | #include <ATen/ops/isreal_native.h> |
559 | #include <ATen/ops/istft_compositeimplicitautograd_dispatch.h> |
560 | #include <ATen/ops/istft_native.h> |
561 | #include <ATen/ops/item_compositeimplicitautograd_dispatch.h> |
562 | #include <ATen/ops/item_native.h> |
563 | #include <ATen/ops/kl_div_compositeimplicitautograd_dispatch.h> |
564 | #include <ATen/ops/kl_div_native.h> |
565 | #include <ATen/ops/kron_compositeimplicitautograd_dispatch.h> |
566 | #include <ATen/ops/kron_native.h> |
567 | #include <ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h> |
568 | #include <ATen/ops/kthvalue_native.h> |
569 | #include <ATen/ops/l1_loss_compositeimplicitautograd_dispatch.h> |
570 | #include <ATen/ops/l1_loss_native.h> |
571 | #include <ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h> |
572 | #include <ATen/ops/layer_norm_native.h> |
573 | #include <ATen/ops/ldexp_compositeimplicitautograd_dispatch.h> |
574 | #include <ATen/ops/ldexp_native.h> |
575 | #include <ATen/ops/less_compositeimplicitautograd_dispatch.h> |
576 | #include <ATen/ops/less_equal_compositeimplicitautograd_dispatch.h> |
577 | #include <ATen/ops/less_equal_native.h> |
578 | #include <ATen/ops/less_native.h> |
579 | #include <ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h> |
580 | #include <ATen/ops/linalg_cholesky_native.h> |
581 | #include <ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h> |
582 | #include <ATen/ops/linalg_cond_native.h> |
583 | #include <ATen/ops/linalg_det_compositeimplicitautograd_dispatch.h> |
584 | #include <ATen/ops/linalg_det_native.h> |
585 | #include <ATen/ops/linalg_diagonal_compositeimplicitautograd_dispatch.h> |
586 | #include <ATen/ops/linalg_diagonal_native.h> |
587 | #include <ATen/ops/linalg_eigh_compositeimplicitautograd_dispatch.h> |
588 | #include <ATen/ops/linalg_eigh_native.h> |
589 | #include <ATen/ops/linalg_eigvals_compositeimplicitautograd_dispatch.h> |
590 | #include <ATen/ops/linalg_eigvals_native.h> |
591 | #include <ATen/ops/linalg_eigvalsh_compositeimplicitautograd_dispatch.h> |
592 | #include <ATen/ops/linalg_eigvalsh_native.h> |
593 | #include <ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h> |
594 | #include <ATen/ops/linalg_inv_native.h> |
595 | #include <ATen/ops/linalg_ldl_factor_compositeimplicitautograd_dispatch.h> |
596 | #include <ATen/ops/linalg_ldl_factor_native.h> |
597 | #include <ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h> |
598 | #include <ATen/ops/linalg_lu_factor_native.h> |
599 | #include <ATen/ops/linalg_matmul_compositeimplicitautograd_dispatch.h> |
600 | #include <ATen/ops/linalg_matmul_native.h> |
601 | #include <ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h> |
602 | #include <ATen/ops/linalg_matrix_norm_native.h> |
603 | #include <ATen/ops/linalg_matrix_power_compositeimplicitautograd_dispatch.h> |
604 | #include <ATen/ops/linalg_matrix_power_native.h> |
605 | #include <ATen/ops/linalg_matrix_rank_compositeimplicitautograd_dispatch.h> |
606 | #include <ATen/ops/linalg_matrix_rank_native.h> |
607 | #include <ATen/ops/linalg_multi_dot_compositeimplicitautograd_dispatch.h> |
608 | #include <ATen/ops/linalg_multi_dot_native.h> |
609 | #include <ATen/ops/linalg_norm_compositeimplicitautograd_dispatch.h> |
610 | #include <ATen/ops/linalg_norm_native.h> |
611 | #include <ATen/ops/linalg_pinv_compositeimplicitautograd_dispatch.h> |
612 | #include <ATen/ops/linalg_pinv_native.h> |
613 | #include <ATen/ops/linalg_slogdet_compositeimplicitautograd_dispatch.h> |
614 | #include <ATen/ops/linalg_slogdet_native.h> |
615 | #include <ATen/ops/linalg_solve_compositeimplicitautograd_dispatch.h> |
616 | #include <ATen/ops/linalg_solve_ex_compositeimplicitautograd_dispatch.h> |
617 | #include <ATen/ops/linalg_solve_ex_native.h> |
618 | #include <ATen/ops/linalg_solve_native.h> |
619 | #include <ATen/ops/linalg_svd_compositeimplicitautograd_dispatch.h> |
620 | #include <ATen/ops/linalg_svd_native.h> |
621 | #include <ATen/ops/linalg_svdvals_compositeimplicitautograd_dispatch.h> |
622 | #include <ATen/ops/linalg_svdvals_native.h> |
623 | #include <ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h> |
624 | #include <ATen/ops/linalg_tensorinv_native.h> |
625 | #include <ATen/ops/linalg_tensorsolve_compositeimplicitautograd_dispatch.h> |
626 | #include <ATen/ops/linalg_tensorsolve_native.h> |
627 | #include <ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h> |
628 | #include <ATen/ops/linalg_vander_native.h> |
629 | #include <ATen/ops/linalg_vecdot_compositeimplicitautograd_dispatch.h> |
630 | #include <ATen/ops/linalg_vecdot_native.h> |
631 | #include <ATen/ops/linear_compositeimplicitautograd_dispatch.h> |
632 | #include <ATen/ops/linear_native.h> |
633 | #include <ATen/ops/log_sigmoid_compositeimplicitautograd_dispatch.h> |
634 | #include <ATen/ops/log_sigmoid_native.h> |
635 | #include <ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h> |
636 | #include <ATen/ops/log_softmax_native.h> |
637 | #include <ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h> |
638 | #include <ATen/ops/logcumsumexp_native.h> |
639 | #include <ATen/ops/logdet_compositeimplicitautograd_dispatch.h> |
640 | #include <ATen/ops/logdet_native.h> |
641 | #include <ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h> |
642 | #include <ATen/ops/logsumexp_native.h> |
643 | #include <ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h> |
644 | #include <ATen/ops/lstm_cell_native.h> |
645 | #include <ATen/ops/lstm_compositeimplicitautograd_dispatch.h> |
646 | #include <ATen/ops/lstm_native.h> |
647 | #include <ATen/ops/lu_solve_compositeimplicitautograd_dispatch.h> |
648 | #include <ATen/ops/lu_solve_native.h> |
649 | #include <ATen/ops/mH_compositeimplicitautograd_dispatch.h> |
650 | #include <ATen/ops/mH_native.h> |
651 | #include <ATen/ops/mT_compositeimplicitautograd_dispatch.h> |
652 | #include <ATen/ops/mT_native.h> |
653 | #include <ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h> |
654 | #include <ATen/ops/margin_ranking_loss_native.h> |
655 | #include <ATen/ops/masked_select_backward_compositeimplicitautograd_dispatch.h> |
656 | #include <ATen/ops/masked_select_backward_native.h> |
657 | #include <ATen/ops/matmul_compositeimplicitautograd_dispatch.h> |
658 | #include <ATen/ops/matmul_native.h> |
659 | #include <ATen/ops/matrix_H_compositeimplicitautograd_dispatch.h> |
660 | #include <ATen/ops/matrix_H_native.h> |
661 | #include <ATen/ops/matrix_exp_backward_compositeimplicitautograd_dispatch.h> |
662 | #include <ATen/ops/matrix_exp_backward_native.h> |
663 | #include <ATen/ops/matrix_exp_compositeimplicitautograd_dispatch.h> |
664 | #include <ATen/ops/matrix_exp_native.h> |
665 | #include <ATen/ops/matrix_power_compositeimplicitautograd_dispatch.h> |
666 | #include <ATen/ops/matrix_power_native.h> |
667 | #include <ATen/ops/max_compositeimplicitautograd_dispatch.h> |
668 | #include <ATen/ops/max_native.h> |
669 | #include <ATen/ops/max_pool1d_compositeimplicitautograd_dispatch.h> |
670 | #include <ATen/ops/max_pool1d_native.h> |
671 | #include <ATen/ops/max_pool1d_with_indices_compositeimplicitautograd_dispatch.h> |
672 | #include <ATen/ops/max_pool1d_with_indices_native.h> |
673 | #include <ATen/ops/max_pool2d_compositeimplicitautograd_dispatch.h> |
674 | #include <ATen/ops/max_pool2d_native.h> |
675 | #include <ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h> |
676 | #include <ATen/ops/max_pool3d_native.h> |
677 | #include <ATen/ops/mean_compositeimplicitautograd_dispatch.h> |
678 | #include <ATen/ops/mean_native.h> |
679 | #include <ATen/ops/median_compositeimplicitautograd_dispatch.h> |
680 | #include <ATen/ops/median_native.h> |
681 | #include <ATen/ops/meshgrid_compositeimplicitautograd_dispatch.h> |
682 | #include <ATen/ops/meshgrid_native.h> |
683 | #include <ATen/ops/min_compositeimplicitautograd_dispatch.h> |
684 | #include <ATen/ops/min_native.h> |
685 | #include <ATen/ops/mish_backward_compositeimplicitautograd_dispatch.h> |
686 | #include <ATen/ops/mish_backward_native.h> |
687 | #include <ATen/ops/mode_compositeimplicitautograd_dispatch.h> |
688 | #include <ATen/ops/mode_native.h> |
689 | #include <ATen/ops/moveaxis_compositeimplicitautograd_dispatch.h> |
690 | #include <ATen/ops/moveaxis_native.h> |
691 | #include <ATen/ops/movedim_compositeimplicitautograd_dispatch.h> |
692 | #include <ATen/ops/movedim_native.h> |
693 | #include <ATen/ops/msort_compositeimplicitautograd_dispatch.h> |
694 | #include <ATen/ops/msort_native.h> |
695 | #include <ATen/ops/multilabel_margin_loss_compositeimplicitautograd_dispatch.h> |
696 | #include <ATen/ops/multilabel_margin_loss_native.h> |
697 | #include <ATen/ops/multiply_compositeimplicitautograd_dispatch.h> |
698 | #include <ATen/ops/multiply_native.h> |
699 | #include <ATen/ops/nanmean_compositeimplicitautograd_dispatch.h> |
700 | #include <ATen/ops/nanmean_native.h> |
701 | #include <ATen/ops/nanmedian_compositeimplicitautograd_dispatch.h> |
702 | #include <ATen/ops/nanmedian_native.h> |
703 | #include <ATen/ops/nanquantile_compositeimplicitautograd_dispatch.h> |
704 | #include <ATen/ops/nanquantile_native.h> |
705 | #include <ATen/ops/narrow_compositeimplicitautograd_dispatch.h> |
706 | #include <ATen/ops/narrow_native.h> |
707 | #include <ATen/ops/native_channel_shuffle_compositeimplicitautograd_dispatch.h> |
708 | #include <ATen/ops/native_channel_shuffle_native.h> |
709 | #include <ATen/ops/negative_compositeimplicitautograd_dispatch.h> |
710 | #include <ATen/ops/negative_native.h> |
711 | #include <ATen/ops/nested_to_padded_tensor_compositeimplicitautograd_dispatch.h> |
712 | #include <ATen/ops/nested_to_padded_tensor_native.h> |
713 | #include <ATen/ops/nll_loss2d_compositeimplicitautograd_dispatch.h> |
714 | #include <ATen/ops/nll_loss2d_native.h> |
715 | #include <ATen/ops/nll_loss_compositeimplicitautograd_dispatch.h> |
716 | #include <ATen/ops/nll_loss_native.h> |
717 | #include <ATen/ops/nll_loss_nd_compositeimplicitautograd_dispatch.h> |
718 | #include <ATen/ops/nll_loss_nd_native.h> |
719 | #include <ATen/ops/nonzero_numpy_compositeimplicitautograd_dispatch.h> |
720 | #include <ATen/ops/nonzero_numpy_native.h> |
721 | #include <ATen/ops/norm_compositeimplicitautograd_dispatch.h> |
722 | #include <ATen/ops/norm_except_dim_compositeimplicitautograd_dispatch.h> |
723 | #include <ATen/ops/norm_except_dim_native.h> |
724 | #include <ATen/ops/norm_native.h> |
725 | #include <ATen/ops/not_equal_compositeimplicitautograd_dispatch.h> |
726 | #include <ATen/ops/not_equal_native.h> |
727 | #include <ATen/ops/nuclear_norm_compositeimplicitautograd_dispatch.h> |
728 | #include <ATen/ops/nuclear_norm_native.h> |
729 | #include <ATen/ops/numpy_T_compositeimplicitautograd_dispatch.h> |
730 | #include <ATen/ops/numpy_T_native.h> |
731 | #include <ATen/ops/one_hot_compositeimplicitautograd_dispatch.h> |
732 | #include <ATen/ops/one_hot_native.h> |
733 | #include <ATen/ops/or_compositeimplicitautograd_dispatch.h> |
734 | #include <ATen/ops/or_native.h> |
735 | #include <ATen/ops/orgqr_compositeimplicitautograd_dispatch.h> |
736 | #include <ATen/ops/orgqr_native.h> |
737 | #include <ATen/ops/outer_compositeimplicitautograd_dispatch.h> |
738 | #include <ATen/ops/outer_native.h> |
739 | #include <ATen/ops/output_nr_compositeimplicitautograd_dispatch.h> |
740 | #include <ATen/ops/output_nr_native.h> |
741 | #include <ATen/ops/pad_compositeimplicitautograd_dispatch.h> |
742 | #include <ATen/ops/pad_native.h> |
743 | #include <ATen/ops/pad_sequence_compositeimplicitautograd_dispatch.h> |
744 | #include <ATen/ops/pad_sequence_native.h> |
745 | #include <ATen/ops/pairwise_distance_compositeimplicitautograd_dispatch.h> |
746 | #include <ATen/ops/pairwise_distance_native.h> |
747 | #include <ATen/ops/pdist_compositeimplicitautograd_dispatch.h> |
748 | #include <ATen/ops/pdist_native.h> |
749 | #include <ATen/ops/pin_memory_compositeimplicitautograd_dispatch.h> |
750 | #include <ATen/ops/pin_memory_native.h> |
751 | #include <ATen/ops/pinverse_compositeimplicitautograd_dispatch.h> |
752 | #include <ATen/ops/pinverse_native.h> |
753 | #include <ATen/ops/poisson_nll_loss_compositeimplicitautograd_dispatch.h> |
754 | #include <ATen/ops/poisson_nll_loss_native.h> |
755 | #include <ATen/ops/positive_compositeimplicitautograd_dispatch.h> |
756 | #include <ATen/ops/positive_native.h> |
757 | #include <ATen/ops/prelu_compositeimplicitautograd_dispatch.h> |
758 | #include <ATen/ops/prelu_native.h> |
759 | #include <ATen/ops/prod_compositeimplicitautograd_dispatch.h> |
760 | #include <ATen/ops/prod_native.h> |
761 | #include <ATen/ops/promote_types_compositeimplicitautograd_dispatch.h> |
762 | #include <ATen/ops/promote_types_native.h> |
763 | #include <ATen/ops/qr_compositeimplicitautograd_dispatch.h> |
764 | #include <ATen/ops/qr_native.h> |
765 | #include <ATen/ops/quantile_compositeimplicitautograd_dispatch.h> |
766 | #include <ATen/ops/quantile_native.h> |
767 | #include <ATen/ops/quantized_gru_cell_compositeimplicitautograd_dispatch.h> |
768 | #include <ATen/ops/quantized_gru_cell_native.h> |
769 | #include <ATen/ops/quantized_lstm_cell_compositeimplicitautograd_dispatch.h> |
770 | #include <ATen/ops/quantized_lstm_cell_native.h> |
771 | #include <ATen/ops/quantized_rnn_relu_cell_compositeimplicitautograd_dispatch.h> |
772 | #include <ATen/ops/quantized_rnn_relu_cell_native.h> |
773 | #include <ATen/ops/quantized_rnn_tanh_cell_compositeimplicitautograd_dispatch.h> |
774 | #include <ATen/ops/quantized_rnn_tanh_cell_native.h> |
775 | #include <ATen/ops/rand_compositeimplicitautograd_dispatch.h> |
776 | #include <ATen/ops/rand_native.h> |
777 | #include <ATen/ops/randn_compositeimplicitautograd_dispatch.h> |
778 | #include <ATen/ops/randn_native.h> |
779 | #include <ATen/ops/ravel_compositeimplicitautograd_dispatch.h> |
780 | #include <ATen/ops/ravel_native.h> |
781 | #include <ATen/ops/real_compositeimplicitautograd_dispatch.h> |
782 | #include <ATen/ops/real_native.h> |
783 | #include <ATen/ops/refine_names_compositeimplicitautograd_dispatch.h> |
784 | #include <ATen/ops/refine_names_native.h> |
785 | #include <ATen/ops/relu6_compositeimplicitautograd_dispatch.h> |
786 | #include <ATen/ops/relu6_native.h> |
787 | #include <ATen/ops/rename_compositeimplicitautograd_dispatch.h> |
788 | #include <ATen/ops/rename_native.h> |
789 | #include <ATen/ops/repeat_interleave_compositeimplicitautograd_dispatch.h> |
790 | #include <ATen/ops/repeat_interleave_native.h> |
791 | #include <ATen/ops/requires_grad_compositeimplicitautograd_dispatch.h> |
792 | #include <ATen/ops/requires_grad_native.h> |
793 | #include <ATen/ops/reshape_as_compositeimplicitautograd_dispatch.h> |
794 | #include <ATen/ops/reshape_as_native.h> |
795 | #include <ATen/ops/reshape_compositeimplicitautograd_dispatch.h> |
796 | #include <ATen/ops/reshape_native.h> |
797 | #include <ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h> |
798 | #include <ATen/ops/resolve_conj_native.h> |
799 | #include <ATen/ops/resolve_neg_compositeimplicitautograd_dispatch.h> |
800 | #include <ATen/ops/resolve_neg_native.h> |
801 | #include <ATen/ops/result_type_compositeimplicitautograd_dispatch.h> |
802 | #include <ATen/ops/result_type_native.h> |
803 | #include <ATen/ops/retain_grad_compositeimplicitautograd_dispatch.h> |
804 | #include <ATen/ops/retain_grad_native.h> |
805 | #include <ATen/ops/retains_grad_compositeimplicitautograd_dispatch.h> |
806 | #include <ATen/ops/retains_grad_native.h> |
807 | #include <ATen/ops/rnn_relu_cell_compositeimplicitautograd_dispatch.h> |
808 | #include <ATen/ops/rnn_relu_cell_native.h> |
809 | #include <ATen/ops/rnn_relu_compositeimplicitautograd_dispatch.h> |
810 | #include <ATen/ops/rnn_relu_native.h> |
811 | #include <ATen/ops/rnn_tanh_cell_compositeimplicitautograd_dispatch.h> |
812 | #include <ATen/ops/rnn_tanh_cell_native.h> |
813 | #include <ATen/ops/rnn_tanh_compositeimplicitautograd_dispatch.h> |
814 | #include <ATen/ops/rnn_tanh_native.h> |
815 | #include <ATen/ops/row_stack_compositeimplicitautograd_dispatch.h> |
816 | #include <ATen/ops/row_stack_native.h> |
817 | #include <ATen/ops/rrelu_compositeimplicitautograd_dispatch.h> |
818 | #include <ATen/ops/rrelu_native.h> |
819 | #include <ATen/ops/scaled_dot_product_attention_compositeimplicitautograd_dispatch.h> |
820 | #include <ATen/ops/scaled_dot_product_attention_native.h> |
821 | #include <ATen/ops/scatter_add_compositeimplicitautograd_dispatch.h> |
822 | #include <ATen/ops/scatter_add_native.h> |
823 | #include <ATen/ops/scatter_compositeimplicitautograd_dispatch.h> |
824 | #include <ATen/ops/scatter_native.h> |
825 | #include <ATen/ops/select_compositeimplicitautograd_dispatch.h> |
826 | #include <ATen/ops/select_native.h> |
827 | #include <ATen/ops/selu_compositeimplicitautograd_dispatch.h> |
828 | #include <ATen/ops/selu_native.h> |
829 | #include <ATen/ops/set_compositeimplicitautograd_dispatch.h> |
830 | #include <ATen/ops/set_data_compositeimplicitautograd_dispatch.h> |
831 | #include <ATen/ops/set_data_native.h> |
832 | #include <ATen/ops/set_native.h> |
833 | #include <ATen/ops/silu_backward_compositeimplicitautograd_dispatch.h> |
834 | #include <ATen/ops/silu_backward_native.h> |
835 | #include <ATen/ops/size_compositeimplicitautograd_dispatch.h> |
836 | #include <ATen/ops/size_native.h> |
837 | #include <ATen/ops/slogdet_compositeimplicitautograd_dispatch.h> |
838 | #include <ATen/ops/slogdet_native.h> |
839 | #include <ATen/ops/slow_conv3d_compositeimplicitautograd_dispatch.h> |
840 | #include <ATen/ops/slow_conv3d_native.h> |
841 | #include <ATen/ops/smm_compositeimplicitautograd_dispatch.h> |
842 | #include <ATen/ops/smm_native.h> |
843 | #include <ATen/ops/softmax_compositeimplicitautograd_dispatch.h> |
844 | #include <ATen/ops/softmax_native.h> |
845 | #include <ATen/ops/sort_compositeimplicitautograd_dispatch.h> |
846 | #include <ATen/ops/sort_native.h> |
847 | #include <ATen/ops/sparse_bsc_tensor_compositeimplicitautograd_dispatch.h> |
848 | #include <ATen/ops/sparse_bsc_tensor_native.h> |
849 | #include <ATen/ops/sparse_bsr_tensor_compositeimplicitautograd_dispatch.h> |
850 | #include <ATen/ops/sparse_bsr_tensor_native.h> |
851 | #include <ATen/ops/sparse_compressed_tensor_compositeimplicitautograd_dispatch.h> |
852 | #include <ATen/ops/sparse_compressed_tensor_native.h> |
853 | #include <ATen/ops/sparse_coo_tensor_compositeimplicitautograd_dispatch.h> |
854 | #include <ATen/ops/sparse_coo_tensor_native.h> |
855 | #include <ATen/ops/sparse_csc_tensor_compositeimplicitautograd_dispatch.h> |
856 | #include <ATen/ops/sparse_csc_tensor_native.h> |
857 | #include <ATen/ops/sparse_csr_tensor_compositeimplicitautograd_dispatch.h> |
858 | #include <ATen/ops/sparse_csr_tensor_native.h> |
859 | #include <ATen/ops/special_chebyshev_polynomial_t_compositeimplicitautograd_dispatch.h> |
860 | #include <ATen/ops/special_chebyshev_polynomial_t_native.h> |
861 | #include <ATen/ops/special_chebyshev_polynomial_u_compositeimplicitautograd_dispatch.h> |
862 | #include <ATen/ops/special_chebyshev_polynomial_u_native.h> |
863 | #include <ATen/ops/special_chebyshev_polynomial_v_compositeimplicitautograd_dispatch.h> |
864 | #include <ATen/ops/special_chebyshev_polynomial_v_native.h> |
865 | #include <ATen/ops/special_chebyshev_polynomial_w_compositeimplicitautograd_dispatch.h> |
866 | #include <ATen/ops/special_chebyshev_polynomial_w_native.h> |
867 | #include <ATen/ops/special_digamma_compositeimplicitautograd_dispatch.h> |
868 | #include <ATen/ops/special_digamma_native.h> |
869 | #include <ATen/ops/special_erf_compositeimplicitautograd_dispatch.h> |
870 | #include <ATen/ops/special_erf_native.h> |
871 | #include <ATen/ops/special_erfc_compositeimplicitautograd_dispatch.h> |
872 | #include <ATen/ops/special_erfc_native.h> |
873 | #include <ATen/ops/special_erfinv_compositeimplicitautograd_dispatch.h> |
874 | #include <ATen/ops/special_erfinv_native.h> |
875 | #include <ATen/ops/special_exp2_compositeimplicitautograd_dispatch.h> |
876 | #include <ATen/ops/special_exp2_native.h> |
877 | #include <ATen/ops/special_expit_compositeimplicitautograd_dispatch.h> |
878 | #include <ATen/ops/special_expit_native.h> |
879 | #include <ATen/ops/special_expm1_compositeimplicitautograd_dispatch.h> |
880 | #include <ATen/ops/special_expm1_native.h> |
881 | #include <ATen/ops/special_gammainc_compositeimplicitautograd_dispatch.h> |
882 | #include <ATen/ops/special_gammainc_native.h> |
883 | #include <ATen/ops/special_gammaincc_compositeimplicitautograd_dispatch.h> |
884 | #include <ATen/ops/special_gammaincc_native.h> |
885 | #include <ATen/ops/special_gammaln_compositeimplicitautograd_dispatch.h> |
886 | #include <ATen/ops/special_gammaln_native.h> |
887 | #include <ATen/ops/special_hermite_polynomial_h_compositeimplicitautograd_dispatch.h> |
888 | #include <ATen/ops/special_hermite_polynomial_h_native.h> |
889 | #include <ATen/ops/special_hermite_polynomial_he_compositeimplicitautograd_dispatch.h> |
890 | #include <ATen/ops/special_hermite_polynomial_he_native.h> |
891 | #include <ATen/ops/special_i0_compositeimplicitautograd_dispatch.h> |
892 | #include <ATen/ops/special_i0_native.h> |
893 | #include <ATen/ops/special_laguerre_polynomial_l_compositeimplicitautograd_dispatch.h> |
894 | #include <ATen/ops/special_laguerre_polynomial_l_native.h> |
895 | #include <ATen/ops/special_legendre_polynomial_p_compositeimplicitautograd_dispatch.h> |
896 | #include <ATen/ops/special_legendre_polynomial_p_native.h> |
897 | #include <ATen/ops/special_log1p_compositeimplicitautograd_dispatch.h> |
898 | #include <ATen/ops/special_log1p_native.h> |
899 | #include <ATen/ops/special_log_softmax_compositeimplicitautograd_dispatch.h> |
900 | #include <ATen/ops/special_log_softmax_native.h> |
901 | #include <ATen/ops/special_logit_compositeimplicitautograd_dispatch.h> |
902 | #include <ATen/ops/special_logit_native.h> |
903 | #include <ATen/ops/special_logsumexp_compositeimplicitautograd_dispatch.h> |
904 | #include <ATen/ops/special_logsumexp_native.h> |
905 | #include <ATen/ops/special_multigammaln_compositeimplicitautograd_dispatch.h> |
906 | #include <ATen/ops/special_multigammaln_native.h> |
907 | #include <ATen/ops/special_ndtr_compositeimplicitautograd_dispatch.h> |
908 | #include <ATen/ops/special_ndtr_native.h> |
909 | #include <ATen/ops/special_polygamma_compositeimplicitautograd_dispatch.h> |
910 | #include <ATen/ops/special_polygamma_native.h> |
911 | #include <ATen/ops/special_psi_compositeimplicitautograd_dispatch.h> |
912 | #include <ATen/ops/special_psi_native.h> |
913 | #include <ATen/ops/special_round_compositeimplicitautograd_dispatch.h> |
914 | #include <ATen/ops/special_round_native.h> |
915 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_compositeimplicitautograd_dispatch.h> |
916 | #include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h> |
917 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_compositeimplicitautograd_dispatch.h> |
918 | #include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h> |
919 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_compositeimplicitautograd_dispatch.h> |
920 | #include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h> |
921 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_compositeimplicitautograd_dispatch.h> |
922 | #include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h> |
923 | #include <ATen/ops/special_sinc_compositeimplicitautograd_dispatch.h> |
924 | #include <ATen/ops/special_sinc_native.h> |
925 | #include <ATen/ops/special_softmax_compositeimplicitautograd_dispatch.h> |
926 | #include <ATen/ops/special_softmax_native.h> |
927 | #include <ATen/ops/special_xlogy_compositeimplicitautograd_dispatch.h> |
928 | #include <ATen/ops/special_xlogy_native.h> |
929 | #include <ATen/ops/split_compositeimplicitautograd_dispatch.h> |
930 | #include <ATen/ops/split_native.h> |
931 | #include <ATen/ops/square_compositeimplicitautograd_dispatch.h> |
932 | #include <ATen/ops/square_native.h> |
933 | #include <ATen/ops/squeeze_compositeimplicitautograd_dispatch.h> |
934 | #include <ATen/ops/squeeze_native.h> |
935 | #include <ATen/ops/sspaddmm_compositeimplicitautograd_dispatch.h> |
936 | #include <ATen/ops/sspaddmm_native.h> |
937 | #include <ATen/ops/std_compositeimplicitautograd_dispatch.h> |
938 | #include <ATen/ops/std_mean_compositeimplicitautograd_dispatch.h> |
939 | #include <ATen/ops/std_mean_native.h> |
940 | #include <ATen/ops/std_native.h> |
941 | #include <ATen/ops/stft_compositeimplicitautograd_dispatch.h> |
942 | #include <ATen/ops/stft_native.h> |
943 | #include <ATen/ops/stride_compositeimplicitautograd_dispatch.h> |
944 | #include <ATen/ops/stride_native.h> |
945 | #include <ATen/ops/subtract_compositeimplicitautograd_dispatch.h> |
946 | #include <ATen/ops/subtract_native.h> |
947 | #include <ATen/ops/sum_compositeimplicitautograd_dispatch.h> |
948 | #include <ATen/ops/sum_native.h> |
949 | #include <ATen/ops/sum_to_size_compositeimplicitautograd_dispatch.h> |
950 | #include <ATen/ops/sum_to_size_native.h> |
951 | #include <ATen/ops/svd_compositeimplicitautograd_dispatch.h> |
952 | #include <ATen/ops/svd_native.h> |
953 | #include <ATen/ops/swapaxes_compositeimplicitautograd_dispatch.h> |
954 | #include <ATen/ops/swapaxes_native.h> |
955 | #include <ATen/ops/swapdims_compositeimplicitautograd_dispatch.h> |
956 | #include <ATen/ops/swapdims_native.h> |
957 | #include <ATen/ops/take_along_dim_compositeimplicitautograd_dispatch.h> |
958 | #include <ATen/ops/take_along_dim_native.h> |
959 | #include <ATen/ops/tensor_split_compositeimplicitautograd_dispatch.h> |
960 | #include <ATen/ops/tensor_split_native.h> |
961 | #include <ATen/ops/tensordot_compositeimplicitautograd_dispatch.h> |
962 | #include <ATen/ops/tensordot_native.h> |
963 | #include <ATen/ops/thnn_conv2d_compositeimplicitautograd_dispatch.h> |
964 | #include <ATen/ops/thnn_conv2d_native.h> |
965 | #include <ATen/ops/tile_compositeimplicitautograd_dispatch.h> |
966 | #include <ATen/ops/tile_native.h> |
967 | #include <ATen/ops/to_compositeimplicitautograd_dispatch.h> |
968 | #include <ATen/ops/to_dense_backward_compositeimplicitautograd_dispatch.h> |
969 | #include <ATen/ops/to_dense_backward_native.h> |
970 | #include <ATen/ops/to_dense_compositeimplicitautograd_dispatch.h> |
971 | #include <ATen/ops/to_dense_native.h> |
972 | #include <ATen/ops/to_mkldnn_backward_compositeimplicitautograd_dispatch.h> |
973 | #include <ATen/ops/to_mkldnn_backward_native.h> |
974 | #include <ATen/ops/to_native.h> |
975 | #include <ATen/ops/trace_backward_compositeimplicitautograd_dispatch.h> |
976 | #include <ATen/ops/trace_backward_native.h> |
977 | #include <ATen/ops/transpose_compositeimplicitautograd_dispatch.h> |
978 | #include <ATen/ops/transpose_native.h> |
979 | #include <ATen/ops/trapezoid_compositeimplicitautograd_dispatch.h> |
980 | #include <ATen/ops/trapezoid_native.h> |
981 | #include <ATen/ops/trapz_compositeimplicitautograd_dispatch.h> |
982 | #include <ATen/ops/trapz_native.h> |
983 | #include <ATen/ops/triplet_margin_loss_compositeimplicitautograd_dispatch.h> |
984 | #include <ATen/ops/triplet_margin_loss_native.h> |
985 | #include <ATen/ops/true_divide_compositeimplicitautograd_dispatch.h> |
986 | #include <ATen/ops/true_divide_native.h> |
987 | #include <ATen/ops/type_as_compositeimplicitautograd_dispatch.h> |
988 | #include <ATen/ops/type_as_native.h> |
989 | #include <ATen/ops/unbind_compositeimplicitautograd_dispatch.h> |
990 | #include <ATen/ops/unbind_native.h> |
991 | #include <ATen/ops/unflatten_compositeimplicitautograd_dispatch.h> |
992 | #include <ATen/ops/unflatten_dense_tensors_compositeimplicitautograd_dispatch.h> |
993 | #include <ATen/ops/unflatten_dense_tensors_native.h> |
994 | #include <ATen/ops/unflatten_native.h> |
995 | #include <ATen/ops/unsafe_chunk_compositeimplicitautograd_dispatch.h> |
996 | #include <ATen/ops/unsafe_chunk_native.h> |
997 | #include <ATen/ops/upsample_bicubic2d_compositeimplicitautograd_dispatch.h> |
998 | #include <ATen/ops/upsample_bicubic2d_native.h> |
999 | #include <ATen/ops/upsample_bilinear2d_compositeimplicitautograd_dispatch.h> |
1000 | #include <ATen/ops/upsample_bilinear2d_native.h> |
1001 | #include <ATen/ops/upsample_linear1d_compositeimplicitautograd_dispatch.h> |
1002 | #include <ATen/ops/upsample_linear1d_native.h> |
1003 | #include <ATen/ops/upsample_nearest1d_compositeimplicitautograd_dispatch.h> |
1004 | #include <ATen/ops/upsample_nearest1d_native.h> |
1005 | #include <ATen/ops/upsample_nearest2d_compositeimplicitautograd_dispatch.h> |
1006 | #include <ATen/ops/upsample_nearest2d_native.h> |
1007 | #include <ATen/ops/upsample_nearest3d_compositeimplicitautograd_dispatch.h> |
1008 | #include <ATen/ops/upsample_nearest3d_native.h> |
1009 | #include <ATen/ops/upsample_trilinear3d_compositeimplicitautograd_dispatch.h> |
1010 | #include <ATen/ops/upsample_trilinear3d_native.h> |
1011 | #include <ATen/ops/value_selecting_reduction_backward_compositeimplicitautograd_dispatch.h> |
1012 | #include <ATen/ops/value_selecting_reduction_backward_native.h> |
1013 | #include <ATen/ops/vander_compositeimplicitautograd_dispatch.h> |
1014 | #include <ATen/ops/vander_native.h> |
1015 | #include <ATen/ops/var_compositeimplicitautograd_dispatch.h> |
1016 | #include <ATen/ops/var_mean_compositeimplicitautograd_dispatch.h> |
1017 | #include <ATen/ops/var_mean_native.h> |
1018 | #include <ATen/ops/var_native.h> |
1019 | #include <ATen/ops/view_as_compositeimplicitautograd_dispatch.h> |
1020 | #include <ATen/ops/view_as_native.h> |
1021 | #include <ATen/ops/vsplit_compositeimplicitautograd_dispatch.h> |
1022 | #include <ATen/ops/vsplit_native.h> |
1023 | #include <ATen/ops/vstack_compositeimplicitautograd_dispatch.h> |
1024 | #include <ATen/ops/vstack_native.h> |
1025 | #include <ATen/ops/where_compositeimplicitautograd_dispatch.h> |
1026 | #include <ATen/ops/where_native.h> |
1027 | #include <ATen/ops/xor_compositeimplicitautograd_dispatch.h> |
1028 | #include <ATen/ops/xor_native.h> |
1029 | |
1030 | // See template file RegisterDispatchDefinitions.ini |
1031 | namespace at { |
1032 | // NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid |
1033 | // ambiguity with conflicting identifiers that may have been defined in |
1034 | // at namespace already. |
1035 | namespace { |
1036 | void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) { |
1037 | TORCH_CHECK(options.dtype() == out.dtype(), |
1038 | "Expected out tensor to have dtype " , options.dtype(), ", but got " , out.dtype(), " instead" ); |
1039 | TORCH_CHECK(options.device() == out.device(), |
1040 | "Expected out tensor to have device " , options.device(), ", but got " , out.device(), " instead" ); |
1041 | const bool resized = at::native::resize_output(out, sizes); |
1042 | // Only restride if a resize occurred; otherwise we ignore the (advisory) |
1043 | // strides from the meta function and directly use the output tensor's |
1044 | // preexisting strides |
1045 | if (resized) { |
1046 | if (!strides.empty()) { |
1047 | TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value()); |
1048 | // TODO: avoid the redispatch here |
1049 | out.as_strided_(sizes, strides); |
1050 | } else if (options.memory_format_opt().has_value()) { |
1051 | out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt()); |
1052 | } |
1053 | } |
1054 | } |
1055 | void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) { |
1056 | // These checks are needed on those operators that: |
1057 | // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm') |
1058 | // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod') |
1059 | // For other operators (e.g. 'add'), 'TensorIterator' already checks |
1060 | // these things separately. |
1061 | TORCH_CHECK(options.dtype() == self.dtype(), |
1062 | "Bad in-place call: " , |
1063 | "input tensor dtype " , self.dtype(), " and output tensor dtype " , options.dtype(), " should match" ); |
1064 | TORCH_CHECK(options.device() == self.device(), |
1065 | "Bad in-place call: " , |
1066 | "input tensor device " , self.device(), " and output tensor device " , options.device(), " should match" ); |
1067 | TORCH_CHECK(sizes == self.sizes(), |
1068 | "Bad in-place call: " , |
1069 | "input tensor size " , self.sizes(), " and output tensor size " , sizes, " should match" ); |
1070 | } |
1071 | namespace { |
1072 | at::Tensor wrapper_CompositeImplicitAutograd___cast_Byte(const at::Tensor & self, bool non_blocking) { |
1073 | // No device check |
1074 | // DeviceGuard omitted |
1075 | return at::native::_cast_Byte(self, non_blocking); |
1076 | } |
1077 | } // anonymous namespace |
1078 | namespace { |
1079 | at::Tensor wrapper_CompositeImplicitAutograd___cast_Char(const at::Tensor & self, bool non_blocking) { |
1080 | // No device check |
1081 | // DeviceGuard omitted |
1082 | return at::native::_cast_Char(self, non_blocking); |
1083 | } |
1084 | } // anonymous namespace |
1085 | namespace { |
1086 | at::Tensor wrapper_CompositeImplicitAutograd___cast_Double(const at::Tensor & self, bool non_blocking) { |
1087 | // No device check |
1088 | // DeviceGuard omitted |
1089 | return at::native::_cast_Double(self, non_blocking); |
1090 | } |
1091 | } // anonymous namespace |
1092 | namespace { |
1093 | at::Tensor wrapper_CompositeImplicitAutograd___cast_Float(const at::Tensor & self, bool non_blocking) { |
1094 | // No device check |
1095 | // DeviceGuard omitted |
1096 | return at::native::_cast_Float(self, non_blocking); |
1097 | } |
1098 | } // anonymous namespace |
1099 | namespace { |
1100 | at::Tensor wrapper_CompositeImplicitAutograd___cast_Int(const at::Tensor & self, bool non_blocking) { |
1101 | // No device check |
1102 | // DeviceGuard omitted |
1103 | return at::native::_cast_Int(self, non_blocking); |
1104 | } |
1105 | } // anonymous namespace |
1106 | namespace { |
1107 | at::Tensor wrapper_CompositeImplicitAutograd___cast_Long(const at::Tensor & self, bool non_blocking) { |
1108 | // No device check |
1109 | // DeviceGuard omitted |
1110 | return at::native::_cast_Long(self, non_blocking); |
1111 | } |
1112 | } // anonymous namespace |
1113 | namespace { |
1114 | at::Tensor wrapper_CompositeImplicitAutograd___cast_Short(const at::Tensor & self, bool non_blocking) { |
1115 | // No device check |
1116 | // DeviceGuard omitted |
1117 | return at::native::_cast_Short(self, non_blocking); |
1118 | } |
1119 | } // anonymous namespace |
1120 | namespace { |
1121 | at::Tensor wrapper_CompositeImplicitAutograd___cast_Half(const at::Tensor & self, bool non_blocking) { |
1122 | // No device check |
1123 | // DeviceGuard omitted |
1124 | return at::native::_cast_Half(self, non_blocking); |
1125 | } |
1126 | } // anonymous namespace |
1127 | namespace { |
1128 | void wrapper_CompositeImplicitAutograd___backward(const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph) { |
1129 | // No device check |
1130 | // DeviceGuard omitted |
1131 | return at::native::_backward(self, inputs, gradient, retain_graph, create_graph); |
1132 | } |
1133 | } // anonymous namespace |
1134 | namespace { |
1135 | void wrapper_CompositeImplicitAutograd__set_data(at::Tensor & self, const at::Tensor & new_data) { |
1136 | // No device check |
1137 | // DeviceGuard omitted |
1138 | return at::native::set_data(self, new_data); |
1139 | } |
1140 | } // anonymous namespace |
1141 | namespace { |
1142 | at::Tensor wrapper_CompositeImplicitAutograd__data(const at::Tensor & self) { |
1143 | // No device check |
1144 | // DeviceGuard omitted |
1145 | return at::native::data(self); |
1146 | } |
1147 | } // anonymous namespace |
1148 | namespace { |
1149 | bool wrapper_CompositeImplicitAutograd__is_leaf(const at::Tensor & self) { |
1150 | // No device check |
1151 | // DeviceGuard omitted |
1152 | return at::native::is_leaf(self); |
1153 | } |
1154 | } // anonymous namespace |
1155 | namespace { |
1156 | int64_t wrapper_CompositeImplicitAutograd__output_nr(const at::Tensor & self) { |
1157 | // No device check |
1158 | // DeviceGuard omitted |
1159 | return at::native::output_nr(self); |
1160 | } |
1161 | } // anonymous namespace |
1162 | namespace { |
1163 | int64_t wrapper_CompositeImplicitAutograd___version(const at::Tensor & self) { |
1164 | // No device check |
1165 | // DeviceGuard omitted |
1166 | return at::native::_version(self); |
1167 | } |
1168 | } // anonymous namespace |
1169 | namespace { |
1170 | at::Tensor & wrapper_CompositeImplicitAutograd__requires_grad_(at::Tensor & self, bool requires_grad) { |
1171 | // No device check |
1172 | // DeviceGuard omitted |
1173 | return at::native::requires_grad_(self, requires_grad); |
1174 | } |
1175 | } // anonymous namespace |
1176 | namespace { |
1177 | void wrapper_CompositeImplicitAutograd__retain_grad(at::Tensor & self) { |
1178 | // No device check |
1179 | // DeviceGuard omitted |
1180 | return at::native::retain_grad(self); |
1181 | } |
1182 | } // anonymous namespace |
1183 | namespace { |
1184 | bool wrapper_CompositeImplicitAutograd__retains_grad(const at::Tensor & self) { |
1185 | // No device check |
1186 | // DeviceGuard omitted |
1187 | return at::native::retains_grad(self); |
1188 | } |
1189 | } // anonymous namespace |
1190 | namespace { |
1191 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___unpack_dual(const at::Tensor & dual, int64_t level) { |
1192 | // No device check |
1193 | // DeviceGuard omitted |
1194 | return at::native::_unpack_dual(dual, level); |
1195 | } |
1196 | } // anonymous namespace |
1197 | namespace { |
1198 | at::Tensor & wrapper_CompositeImplicitAutograd__rename_(at::Tensor & self, c10::optional<at::DimnameList> names) { |
1199 | // No device check |
1200 | // DeviceGuard omitted |
1201 | return at::native::rename_(self, names); |
1202 | } |
1203 | } // anonymous namespace |
1204 | namespace { |
1205 | at::Tensor wrapper_CompositeImplicitAutograd__rename(const at::Tensor & self, c10::optional<at::DimnameList> names) { |
1206 | // No device check |
1207 | // DeviceGuard omitted |
1208 | return at::native::rename(self, names); |
1209 | } |
1210 | } // anonymous namespace |
1211 | namespace { |
1212 | at::Tensor wrapper_CompositeImplicitAutograd__align_to(const at::Tensor & self, at::DimnameList names) { |
1213 | // No device check |
1214 | // DeviceGuard omitted |
1215 | return at::native::align_to(self, names); |
1216 | } |
1217 | } // anonymous namespace |
1218 | namespace { |
1219 | at::Tensor wrapper_CompositeImplicitAutograd_ellipsis_idx_align_to(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) { |
1220 | // No device check |
1221 | // DeviceGuard omitted |
1222 | return at::native::align_to(self, order, ellipsis_idx); |
1223 | } |
1224 | } // anonymous namespace |
1225 | namespace { |
1226 | at::Tensor wrapper_CompositeImplicitAutograd__align_as(const at::Tensor & self, const at::Tensor & other) { |
1227 | // No device check |
1228 | // DeviceGuard omitted |
1229 | return at::native::align_as(self, other); |
1230 | } |
1231 | } // anonymous namespace |
1232 | namespace { |
1233 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__align_tensors(at::TensorList tensors) { |
1234 | // No device check |
1235 | // DeviceGuard omitted |
1236 | return at::native::align_tensors(tensors); |
1237 | } |
1238 | } // anonymous namespace |
1239 | namespace { |
1240 | void wrapper_CompositeImplicitAutograd___assert_tensor_metadata(const at::Tensor & a, at::OptionalIntArrayRef size, at::OptionalIntArrayRef stride, c10::optional<at::ScalarType> dtype) { |
1241 | // No device check |
1242 | // DeviceGuard omitted |
1243 | return at::native::_assert_tensor_metadata(a, size, stride, dtype); |
1244 | } |
1245 | } // anonymous namespace |
1246 | namespace { |
1247 | at::Tensor wrapper_CompositeImplicitAutograd__refine_names(const at::Tensor & self, at::DimnameList names) { |
1248 | // No device check |
1249 | // DeviceGuard omitted |
1250 | return at::native::refine_names(self, names); |
1251 | } |
1252 | } // anonymous namespace |
1253 | namespace { |
1254 | bool wrapper_CompositeImplicitAutograd___use_cudnn_rnn_flatten_weight() { |
1255 | // No device check |
1256 | // DeviceGuard omitted |
1257 | return at::native::_use_cudnn_rnn_flatten_weight(); |
1258 | } |
1259 | } // anonymous namespace |
1260 | namespace { |
1261 | int64_t wrapper_CompositeImplicitAutograd___debug_has_internal_overlap(const at::Tensor & self) { |
1262 | // No device check |
1263 | // DeviceGuard omitted |
1264 | return at::native::_debug_has_internal_overlap(self); |
1265 | } |
1266 | } // anonymous namespace |
1267 | namespace { |
1268 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___sobol_engine_draw(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<at::ScalarType> dtype) { |
1269 | // No device check |
1270 | // DeviceGuard omitted |
1271 | return at::native::_sobol_engine_draw(quasi, n, sobolstate, dimension, num_generated, dtype); |
1272 | } |
1273 | } // anonymous namespace |
1274 | namespace { |
1275 | at::Tensor & wrapper_CompositeImplicitAutograd___sobol_engine_ff_(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) { |
1276 | // No device check |
1277 | // DeviceGuard omitted |
1278 | return at::native::_sobol_engine_ff_(self, n, sobolstate, dimension, num_generated); |
1279 | } |
1280 | } // anonymous namespace |
1281 | namespace { |
1282 | at::Tensor & wrapper_CompositeImplicitAutograd___sobol_engine_scramble_(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) { |
1283 | // No device check |
1284 | // DeviceGuard omitted |
1285 | return at::native::_sobol_engine_scramble_(self, ltm, dimension); |
1286 | } |
1287 | } // anonymous namespace |
1288 | namespace { |
1289 | at::Tensor & wrapper_CompositeImplicitAutograd___sobol_engine_initialize_state_(at::Tensor & self, int64_t dimension) { |
1290 | // No device check |
1291 | // DeviceGuard omitted |
1292 | return at::native::_sobol_engine_initialize_state_(self, dimension); |
1293 | } |
1294 | } // anonymous namespace |
1295 | namespace { |
1296 | at::Tensor wrapper_CompositeImplicitAutograd___reshape_from_tensor(const at::Tensor & self, const at::Tensor & shape) { |
1297 | // No device check |
1298 | // DeviceGuard omitted |
1299 | return at::native::_reshape_from_tensor(self, shape); |
1300 | } |
1301 | } // anonymous namespace |
1302 | namespace { |
1303 | at::Tensor wrapper_CompositeImplicitAutograd___shape_as_tensor(const at::Tensor & self) { |
1304 | // No device check |
1305 | // DeviceGuard omitted |
1306 | return at::native::_shape_as_tensor(self); |
1307 | } |
1308 | } // anonymous namespace |
1309 | namespace { |
1310 | at::Tensor wrapper_CompositeImplicitAutograd__dropout(const at::Tensor & input, double p, bool train) { |
1311 | // No device check |
1312 | // DeviceGuard omitted |
1313 | return at::native::dropout(input, p, train); |
1314 | } |
1315 | } // anonymous namespace |
1316 | namespace { |
1317 | at::Tensor & wrapper_CompositeImplicitAutograd__dropout_(at::Tensor & self, double p, bool train) { |
1318 | // No device check |
1319 | // DeviceGuard omitted |
1320 | return at::native::dropout_(self, p, train); |
1321 | } |
1322 | } // anonymous namespace |
1323 | namespace { |
1324 | at::Tensor wrapper_CompositeImplicitAutograd__feature_dropout(const at::Tensor & input, double p, bool train) { |
1325 | // No device check |
1326 | // DeviceGuard omitted |
1327 | return at::native::feature_dropout(input, p, train); |
1328 | } |
1329 | } // anonymous namespace |
1330 | namespace { |
1331 | at::Tensor & wrapper_CompositeImplicitAutograd__feature_dropout_(at::Tensor & self, double p, bool train) { |
1332 | // No device check |
1333 | // DeviceGuard omitted |
1334 | return at::native::feature_dropout_(self, p, train); |
1335 | } |
1336 | } // anonymous namespace |
1337 | namespace { |
1338 | at::Tensor wrapper_CompositeImplicitAutograd__alpha_dropout(const at::Tensor & input, double p, bool train) { |
1339 | // No device check |
1340 | // DeviceGuard omitted |
1341 | return at::native::alpha_dropout(input, p, train); |
1342 | } |
1343 | } // anonymous namespace |
1344 | namespace { |
1345 | at::Tensor & wrapper_CompositeImplicitAutograd__alpha_dropout_(at::Tensor & self, double p, bool train) { |
1346 | // No device check |
1347 | // DeviceGuard omitted |
1348 | return at::native::alpha_dropout_(self, p, train); |
1349 | } |
1350 | } // anonymous namespace |
1351 | namespace { |
1352 | at::Tensor wrapper_CompositeImplicitAutograd__feature_alpha_dropout(const at::Tensor & input, double p, bool train) { |
1353 | // No device check |
1354 | // DeviceGuard omitted |
1355 | return at::native::feature_alpha_dropout(input, p, train); |
1356 | } |
1357 | } // anonymous namespace |
1358 | namespace { |
1359 | at::Tensor & wrapper_CompositeImplicitAutograd__feature_alpha_dropout_(at::Tensor & self, double p, bool train) { |
1360 | // No device check |
1361 | // DeviceGuard omitted |
1362 | return at::native::feature_alpha_dropout_(self, p, train); |
1363 | } |
1364 | } // anonymous namespace |
1365 | namespace { |
1366 | at::Tensor wrapper_CompositeImplicitAutograd__absolute(const at::Tensor & self) { |
1367 | // No device check |
1368 | // DeviceGuard omitted |
1369 | return at::native::absolute(self); |
1370 | } |
1371 | } // anonymous namespace |
1372 | namespace { |
1373 | at::Tensor & wrapper_CompositeImplicitAutograd_out_absolute_out(const at::Tensor & self, at::Tensor & out) { |
1374 | // No device check |
1375 | // DeviceGuard omitted |
1376 | return at::native::absolute_out(self, out); |
1377 | } |
1378 | } // anonymous namespace |
1379 | namespace { |
1380 | at::Tensor & wrapper_CompositeImplicitAutograd__absolute_(at::Tensor & self) { |
1381 | // No device check |
1382 | // DeviceGuard omitted |
1383 | return at::native::absolute_(self); |
1384 | } |
1385 | } // anonymous namespace |
1386 | namespace { |
1387 | at::Tensor wrapper_CompositeImplicitAutograd__chalf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
1388 | // No device check |
1389 | // DeviceGuard omitted |
1390 | return at::native::chalf(self, memory_format); |
1391 | } |
1392 | } // anonymous namespace |
1393 | namespace { |
1394 | at::Tensor wrapper_CompositeImplicitAutograd__real(const at::Tensor & self) { |
1395 | // No device check |
1396 | // DeviceGuard omitted |
1397 | return at::native::real(self); |
1398 | } |
1399 | } // anonymous namespace |
1400 | namespace { |
1401 | at::Tensor wrapper_CompositeImplicitAutograd__imag(const at::Tensor & self) { |
1402 | // No device check |
1403 | // DeviceGuard omitted |
1404 | return at::native::imag(self); |
1405 | } |
1406 | } // anonymous namespace |
1407 | namespace { |
1408 | at::Tensor wrapper_CompositeImplicitAutograd__conj(const at::Tensor & self) { |
1409 | // No device check |
1410 | // DeviceGuard omitted |
1411 | return at::native::conj(self); |
1412 | } |
1413 | } // anonymous namespace |
1414 | namespace { |
1415 | at::Tensor wrapper_CompositeImplicitAutograd__conj_physical(const at::Tensor & self) { |
1416 | // No device check |
1417 | // DeviceGuard omitted |
1418 | return at::native::conj_physical(self); |
1419 | } |
1420 | } // anonymous namespace |
1421 | namespace { |
1422 | at::Tensor wrapper_CompositeImplicitAutograd__resolve_conj(const at::Tensor & self) { |
1423 | // No device check |
1424 | // DeviceGuard omitted |
1425 | return at::native::resolve_conj(self); |
1426 | } |
1427 | } // anonymous namespace |
1428 | namespace { |
1429 | at::Tensor wrapper_CompositeImplicitAutograd__resolve_neg(const at::Tensor & self) { |
1430 | // No device check |
1431 | // DeviceGuard omitted |
1432 | return at::native::resolve_neg(self); |
1433 | } |
1434 | } // anonymous namespace |
1435 | namespace { |
1436 | at::Tensor wrapper_CompositeImplicitAutograd__arccos(const at::Tensor & self) { |
1437 | // No device check |
1438 | // DeviceGuard omitted |
1439 | return at::native::arccos(self); |
1440 | } |
1441 | } // anonymous namespace |
1442 | namespace { |
1443 | at::Tensor & wrapper_CompositeImplicitAutograd_out_arccos_out(const at::Tensor & self, at::Tensor & out) { |
1444 | // No device check |
1445 | // DeviceGuard omitted |
1446 | return at::native::arccos_out(self, out); |
1447 | } |
1448 | } // anonymous namespace |
1449 | namespace { |
1450 | at::Tensor & wrapper_CompositeImplicitAutograd__arccos_(at::Tensor & self) { |
1451 | // No device check |
1452 | // DeviceGuard omitted |
1453 | return at::native::arccos_(self); |
1454 | } |
1455 | } // anonymous namespace |
1456 | namespace { |
1457 | at::Tensor wrapper_CompositeImplicitAutograd__avg_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) { |
1458 | // No device check |
1459 | // DeviceGuard omitted |
1460 | return at::native::avg_pool1d(self, kernel_size, stride, padding, ceil_mode, count_include_pad); |
1461 | } |
1462 | } // anonymous namespace |
1463 | namespace { |
1464 | at::Tensor wrapper_CompositeImplicitAutograd__adaptive_avg_pool1d(const at::Tensor & self, at::IntArrayRef output_size) { |
1465 | // No device check |
1466 | // DeviceGuard omitted |
1467 | return at::native::adaptive_avg_pool1d(self, output_size); |
1468 | } |
1469 | } // anonymous namespace |
1470 | namespace { |
1471 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__adaptive_max_pool1d(const at::Tensor & self, at::IntArrayRef output_size) { |
1472 | // No device check |
1473 | // DeviceGuard omitted |
1474 | return at::native::adaptive_max_pool1d(self, output_size); |
1475 | } |
1476 | } // anonymous namespace |
1477 | namespace { |
1478 | at::Tensor wrapper_CompositeImplicitAutograd__affine_grid_generator_backward(const at::Tensor & grad, at::IntArrayRef size, bool align_corners) { |
1479 | // No device check |
1480 | // DeviceGuard omitted |
1481 | return at::native::affine_grid_generator_backward(grad, size, align_corners); |
1482 | } |
1483 | } // anonymous namespace |
1484 | namespace { |
1485 | at::Tensor wrapper_CompositeImplicitAutograd___test_check_tensor(const at::Tensor & self) { |
1486 | // No device check |
1487 | // DeviceGuard omitted |
1488 | return at::native::_test_check_tensor(self); |
1489 | } |
1490 | } // anonymous namespace |
1491 | namespace { |
1492 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_all(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
1493 | // No device check |
1494 | // DeviceGuard omitted |
1495 | return at::native::all(self, dim, keepdim); |
1496 | } |
1497 | } // anonymous namespace |
1498 | namespace { |
1499 | at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_all_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) { |
1500 | // No device check |
1501 | // DeviceGuard omitted |
1502 | return at::native::all_out(self, dim, keepdim, out); |
1503 | } |
1504 | } // anonymous namespace |
1505 | namespace { |
1506 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_any(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
1507 | // No device check |
1508 | // DeviceGuard omitted |
1509 | return at::native::any(self, dim, keepdim); |
1510 | } |
1511 | } // anonymous namespace |
1512 | namespace { |
1513 | at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_any_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) { |
1514 | // No device check |
1515 | // DeviceGuard omitted |
1516 | return at::native::any_out(self, dim, keepdim, out); |
1517 | } |
1518 | } // anonymous namespace |
1519 | namespace { |
1520 | at::Tensor wrapper_CompositeImplicitAutograd___dim_arange(const at::Tensor & like, int64_t dim) { |
1521 | // No device check |
1522 | // DeviceGuard omitted |
1523 | return at::native::_dim_arange(like, dim); |
1524 | } |
1525 | } // anonymous namespace |
1526 | namespace { |
1527 | at::Tensor wrapper_CompositeImplicitAutograd__arccosh(const at::Tensor & self) { |
1528 | // No device check |
1529 | // DeviceGuard omitted |
1530 | return at::native::arccosh(self); |
1531 | } |
1532 | } // anonymous namespace |
1533 | namespace { |
1534 | at::Tensor & wrapper_CompositeImplicitAutograd_out_arccosh_out(const at::Tensor & self, at::Tensor & out) { |
1535 | // No device check |
1536 | // DeviceGuard omitted |
1537 | return at::native::arccosh_out(self, out); |
1538 | } |
1539 | } // anonymous namespace |
1540 | namespace { |
1541 | at::Tensor & wrapper_CompositeImplicitAutograd__arccosh_(at::Tensor & self) { |
1542 | // No device check |
1543 | // DeviceGuard omitted |
1544 | return at::native::arccosh_(self); |
1545 | } |
1546 | } // anonymous namespace |
1547 | namespace { |
1548 | at::Tensor wrapper_CompositeImplicitAutograd__arcsinh(const at::Tensor & self) { |
1549 | // No device check |
1550 | // DeviceGuard omitted |
1551 | return at::native::arcsinh(self); |
1552 | } |
1553 | } // anonymous namespace |
1554 | namespace { |
1555 | at::Tensor & wrapper_CompositeImplicitAutograd_out_arcsinh_out(const at::Tensor & self, at::Tensor & out) { |
1556 | // No device check |
1557 | // DeviceGuard omitted |
1558 | return at::native::arcsinh_out(self, out); |
1559 | } |
1560 | } // anonymous namespace |
1561 | namespace { |
1562 | at::Tensor & wrapper_CompositeImplicitAutograd__arcsinh_(at::Tensor & self) { |
1563 | // No device check |
1564 | // DeviceGuard omitted |
1565 | return at::native::arcsinh_(self); |
1566 | } |
1567 | } // anonymous namespace |
1568 | namespace { |
1569 | at::Tensor wrapper_CompositeImplicitAutograd__arctanh(const at::Tensor & self) { |
1570 | // No device check |
1571 | // DeviceGuard omitted |
1572 | return at::native::arctanh(self); |
1573 | } |
1574 | } // anonymous namespace |
1575 | namespace { |
1576 | at::Tensor & wrapper_CompositeImplicitAutograd_out_arctanh_out(const at::Tensor & self, at::Tensor & out) { |
1577 | // No device check |
1578 | // DeviceGuard omitted |
1579 | return at::native::arctanh_out(self, out); |
1580 | } |
1581 | } // anonymous namespace |
1582 | namespace { |
1583 | at::Tensor & wrapper_CompositeImplicitAutograd__arctanh_(at::Tensor & self) { |
1584 | // No device check |
1585 | // DeviceGuard omitted |
1586 | return at::native::arctanh_(self); |
1587 | } |
1588 | } // anonymous namespace |
1589 | namespace { |
1590 | at::Tensor wrapper_CompositeImplicitAutograd__arcsin(const at::Tensor & self) { |
1591 | // No device check |
1592 | // DeviceGuard omitted |
1593 | return at::native::arcsin(self); |
1594 | } |
1595 | } // anonymous namespace |
1596 | namespace { |
1597 | at::Tensor & wrapper_CompositeImplicitAutograd_out_arcsin_out(const at::Tensor & self, at::Tensor & out) { |
1598 | // No device check |
1599 | // DeviceGuard omitted |
1600 | return at::native::arcsin_out(self, out); |
1601 | } |
1602 | } // anonymous namespace |
1603 | namespace { |
1604 | at::Tensor & wrapper_CompositeImplicitAutograd__arcsin_(at::Tensor & self) { |
1605 | // No device check |
1606 | // DeviceGuard omitted |
1607 | return at::native::arcsin_(self); |
1608 | } |
1609 | } // anonymous namespace |
1610 | namespace { |
1611 | at::Tensor wrapper_CompositeImplicitAutograd__arctan(const at::Tensor & self) { |
1612 | // No device check |
1613 | // DeviceGuard omitted |
1614 | return at::native::arctan(self); |
1615 | } |
1616 | } // anonymous namespace |
1617 | namespace { |
1618 | at::Tensor & wrapper_CompositeImplicitAutograd_out_arctan_out(const at::Tensor & self, at::Tensor & out) { |
1619 | // No device check |
1620 | // DeviceGuard omitted |
1621 | return at::native::arctan_out(self, out); |
1622 | } |
1623 | } // anonymous namespace |
1624 | namespace { |
1625 | at::Tensor & wrapper_CompositeImplicitAutograd__arctan_(at::Tensor & self) { |
1626 | // No device check |
1627 | // DeviceGuard omitted |
1628 | return at::native::arctan_(self); |
1629 | } |
1630 | } // anonymous namespace |
1631 | namespace { |
1632 | at::Tensor wrapper_CompositeImplicitAutograd__atleast_1d(const at::Tensor & self) { |
1633 | // No device check |
1634 | // DeviceGuard omitted |
1635 | return at::native::atleast_1d(self); |
1636 | } |
1637 | } // anonymous namespace |
1638 | namespace { |
1639 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_Sequence_atleast_1d(at::TensorList tensors) { |
1640 | // No device check |
1641 | // DeviceGuard omitted |
1642 | return at::native::atleast_1d(tensors); |
1643 | } |
1644 | } // anonymous namespace |
1645 | namespace { |
1646 | at::Tensor wrapper_CompositeImplicitAutograd__atleast_2d(const at::Tensor & self) { |
1647 | // No device check |
1648 | // DeviceGuard omitted |
1649 | return at::native::atleast_2d(self); |
1650 | } |
1651 | } // anonymous namespace |
1652 | namespace { |
1653 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_Sequence_atleast_2d(at::TensorList tensors) { |
1654 | // No device check |
1655 | // DeviceGuard omitted |
1656 | return at::native::atleast_2d(tensors); |
1657 | } |
1658 | } // anonymous namespace |
1659 | namespace { |
1660 | at::Tensor wrapper_CompositeImplicitAutograd__atleast_3d(const at::Tensor & self) { |
1661 | // No device check |
1662 | // DeviceGuard omitted |
1663 | return at::native::atleast_3d(self); |
1664 | } |
1665 | } // anonymous namespace |
1666 | namespace { |
1667 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_Sequence_atleast_3d(at::TensorList tensors) { |
1668 | // No device check |
1669 | // DeviceGuard omitted |
1670 | return at::native::atleast_3d(tensors); |
1671 | } |
1672 | } // anonymous namespace |
1673 | namespace { |
1674 | at::Tensor wrapper_CompositeImplicitAutograd__batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { |
1675 | // No device check |
1676 | // DeviceGuard omitted |
1677 | return at::native::batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); |
1678 | } |
1679 | } // anonymous namespace |
1680 | namespace { |
1681 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t> wrapper_CompositeImplicitAutograd___batch_norm_impl_index(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { |
1682 | // No device check |
1683 | // DeviceGuard omitted |
1684 | return at::native::_batch_norm_impl_index(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); |
1685 | } |
1686 | } // anonymous namespace |
1687 | namespace { |
1688 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___batch_norm_impl_index_backward(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) { |
1689 | // No device check |
1690 | // DeviceGuard omitted |
1691 | return at::native::_batch_norm_impl_index_backward(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace); |
1692 | } |
1693 | } // anonymous namespace |
1694 | namespace { |
1695 | at::Tensor wrapper_CompositeImplicitAutograd__bilinear(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) { |
1696 | // No device check |
1697 | // DeviceGuard omitted |
1698 | return at::native::bilinear(input1, input2, weight, bias); |
1699 | } |
1700 | } // anonymous namespace |
1701 | namespace { |
1702 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__broadcast_tensors(at::TensorList tensors) { |
1703 | // No device check |
1704 | // DeviceGuard omitted |
1705 | return at::native::broadcast_tensors(tensors); |
1706 | } |
1707 | } // anonymous namespace |
1708 | namespace { |
1709 | at::Tensor wrapper_CompositeImplicitAutograd__broadcast_to(const at::Tensor & self, c10::SymIntArrayRef size) { |
1710 | // No device check |
1711 | // DeviceGuard omitted |
1712 | return at::native::broadcast_to_symint(self, size); |
1713 | } |
1714 | } // anonymous namespace |
1715 | namespace { |
1716 | at::Tensor wrapper_CompositeImplicitAutograd_names_cat(at::TensorList tensors, at::Dimname dim) { |
1717 | // No device check |
1718 | // DeviceGuard omitted |
1719 | return at::native::cat(tensors, dim); |
1720 | } |
1721 | } // anonymous namespace |
1722 | namespace { |
1723 | at::Tensor & wrapper_CompositeImplicitAutograd_names_out_cat_out(at::TensorList tensors, at::Dimname dim, at::Tensor & out) { |
1724 | // No device check |
1725 | // DeviceGuard omitted |
1726 | return at::native::cat_out(tensors, dim, out); |
1727 | } |
1728 | } // anonymous namespace |
1729 | namespace { |
1730 | at::Tensor wrapper_CompositeImplicitAutograd__concat(at::TensorList tensors, int64_t dim) { |
1731 | // No device check |
1732 | // DeviceGuard omitted |
1733 | return at::native::concat(tensors, dim); |
1734 | } |
1735 | } // anonymous namespace |
1736 | namespace { |
1737 | at::Tensor & wrapper_CompositeImplicitAutograd_out_concat_out(at::TensorList tensors, int64_t dim, at::Tensor & out) { |
1738 | // No device check |
1739 | // DeviceGuard omitted |
1740 | return at::native::concat_out(tensors, dim, out); |
1741 | } |
1742 | } // anonymous namespace |
1743 | namespace { |
1744 | at::Tensor wrapper_CompositeImplicitAutograd_names_concat(at::TensorList tensors, at::Dimname dim) { |
1745 | // No device check |
1746 | // DeviceGuard omitted |
1747 | return at::native::concat(tensors, dim); |
1748 | } |
1749 | } // anonymous namespace |
1750 | namespace { |
1751 | at::Tensor & wrapper_CompositeImplicitAutograd_names_out_concat_out(at::TensorList tensors, at::Dimname dim, at::Tensor & out) { |
1752 | // No device check |
1753 | // DeviceGuard omitted |
1754 | return at::native::concat_out(tensors, dim, out); |
1755 | } |
1756 | } // anonymous namespace |
1757 | namespace { |
1758 | at::Tensor wrapper_CompositeImplicitAutograd__concatenate(at::TensorList tensors, int64_t dim) { |
1759 | // No device check |
1760 | // DeviceGuard omitted |
1761 | return at::native::concatenate(tensors, dim); |
1762 | } |
1763 | } // anonymous namespace |
1764 | namespace { |
1765 | at::Tensor & wrapper_CompositeImplicitAutograd_out_concatenate_out(at::TensorList tensors, int64_t dim, at::Tensor & out) { |
1766 | // No device check |
1767 | // DeviceGuard omitted |
1768 | return at::native::concatenate_out(tensors, dim, out); |
1769 | } |
1770 | } // anonymous namespace |
1771 | namespace { |
1772 | at::Tensor wrapper_CompositeImplicitAutograd_names_concatenate(at::TensorList tensors, at::Dimname dim) { |
1773 | // No device check |
1774 | // DeviceGuard omitted |
1775 | return at::native::concatenate(tensors, dim); |
1776 | } |
1777 | } // anonymous namespace |
1778 | namespace { |
1779 | at::Tensor & wrapper_CompositeImplicitAutograd_names_out_concatenate_out(at::TensorList tensors, at::Dimname dim, at::Tensor & out) { |
1780 | // No device check |
1781 | // DeviceGuard omitted |
1782 | return at::native::concatenate_out(tensors, dim, out); |
1783 | } |
1784 | } // anonymous namespace |
1785 | namespace { |
1786 | at::Tensor wrapper_CompositeImplicitAutograd__chain_matmul(at::TensorList matrices) { |
1787 | // No device check |
1788 | // DeviceGuard omitted |
1789 | return at::native::chain_matmul(matrices); |
1790 | } |
1791 | } // anonymous namespace |
1792 | namespace { |
1793 | at::Tensor & wrapper_CompositeImplicitAutograd_out_chain_matmul_out(at::TensorList matrices, at::Tensor & out) { |
1794 | // No device check |
1795 | // DeviceGuard omitted |
1796 | return at::native::chain_matmul_out(matrices, out); |
1797 | } |
1798 | } // anonymous namespace |
1799 | namespace { |
1800 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__unsafe_chunk(const at::Tensor & self, int64_t chunks, int64_t dim) { |
1801 | // No device check |
1802 | // DeviceGuard omitted |
1803 | return at::native::unsafe_chunk(self, chunks, dim); |
1804 | } |
1805 | } // anonymous namespace |
1806 | namespace { |
1807 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__chunk(const at::Tensor & self, int64_t chunks, int64_t dim) { |
1808 | // No device check |
1809 | // DeviceGuard omitted |
1810 | return at::native::chunk(self, chunks, dim); |
1811 | } |
1812 | } // anonymous namespace |
1813 | namespace { |
1814 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_sections_tensor_split(const at::Tensor & self, c10::SymInt sections, int64_t dim) { |
1815 | // No device check |
1816 | // DeviceGuard omitted |
1817 | return at::native::tensor_split_sections_symint(self, sections, dim); |
1818 | } |
1819 | } // anonymous namespace |
1820 | namespace { |
1821 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_indices_tensor_split(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) { |
1822 | // No device check |
1823 | // DeviceGuard omitted |
1824 | return at::native::tensor_split_indices_symint(self, indices, dim); |
1825 | } |
1826 | } // anonymous namespace |
1827 | namespace { |
1828 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_tensor_indices_or_sections_tensor_split(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) { |
1829 | // No device check |
1830 | // DeviceGuard omitted |
1831 | return at::native::tensor_split(self, tensor_indices_or_sections, dim); |
1832 | } |
1833 | } // anonymous namespace |
1834 | namespace { |
1835 | at::Tensor wrapper_CompositeImplicitAutograd__clip(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) { |
1836 | // No device check |
1837 | // DeviceGuard omitted |
1838 | return at::native::clip(self, min, max); |
1839 | } |
1840 | } // anonymous namespace |
1841 | namespace { |
1842 | at::Tensor & wrapper_CompositeImplicitAutograd_out_clip_out(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max, at::Tensor & out) { |
1843 | // No device check |
1844 | // DeviceGuard omitted |
1845 | return at::native::clip_out(self, min, max, out); |
1846 | } |
1847 | } // anonymous namespace |
1848 | namespace { |
1849 | at::Tensor & wrapper_CompositeImplicitAutograd__clip_(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) { |
1850 | // No device check |
1851 | // DeviceGuard omitted |
1852 | return at::native::clip_(self, min, max); |
1853 | } |
1854 | } // anonymous namespace |
1855 | namespace { |
1856 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_clip(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) { |
1857 | // No device check |
1858 | // DeviceGuard omitted |
1859 | return at::native::clip(self, min, max); |
1860 | } |
1861 | } // anonymous namespace |
1862 | namespace { |
1863 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_out_clip_out(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max, at::Tensor & out) { |
1864 | // No device check |
1865 | // DeviceGuard omitted |
1866 | return at::native::clip_out(self, min, max, out); |
1867 | } |
1868 | } // anonymous namespace |
1869 | namespace { |
1870 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_clip_(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) { |
1871 | // No device check |
1872 | // DeviceGuard omitted |
1873 | return at::native::clip_(self, min, max); |
1874 | } |
1875 | } // anonymous namespace |
1876 | namespace { |
1877 | bool wrapper_CompositeImplicitAutograd__cudnn_is_acceptable(const at::Tensor & self) { |
1878 | // No device check |
1879 | // DeviceGuard omitted |
1880 | return at::native::cudnn_is_acceptable(self); |
1881 | } |
1882 | } // anonymous namespace |
1883 | namespace { |
1884 | at::Tensor wrapper_CompositeImplicitAutograd__contiguous(const at::Tensor & self, at::MemoryFormat memory_format) { |
1885 | // No device check |
1886 | // DeviceGuard omitted |
1887 | return at::native::contiguous(self, memory_format); |
1888 | } |
1889 | } // anonymous namespace |
1890 | namespace { |
1891 | at::Tensor wrapper_CompositeImplicitAutograd_deprecated__convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) { |
1892 | // No device check |
1893 | // DeviceGuard omitted |
1894 | return at::native::_convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); |
1895 | } |
1896 | } // anonymous namespace |
1897 | namespace { |
1898 | at::Tensor wrapper_CompositeImplicitAutograd___convolution_mode(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) { |
1899 | // No device check |
1900 | // DeviceGuard omitted |
1901 | return at::native::_convolution_mode(input, weight, bias, stride, padding, dilation, groups); |
1902 | } |
1903 | } // anonymous namespace |
1904 | namespace { |
1905 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___convolution_double_backward(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) { |
1906 | // No device check |
1907 | // DeviceGuard omitted |
1908 | return at::native::_convolution_double_backward(ggI, ggW, ggb, gO, weight, self, stride, C10_AS_INTARRAYREF_SLOW(padding), dilation, transposed, C10_AS_INTARRAYREF_SLOW(output_padding), groups, output_mask); |
1909 | } |
1910 | } // anonymous namespace |
1911 | namespace { |
1912 | at::Tensor wrapper_CompositeImplicitAutograd__conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { |
1913 | // No device check |
1914 | // DeviceGuard omitted |
1915 | return at::native::conv1d(input, weight, bias, stride, padding, dilation, groups); |
1916 | } |
1917 | } // anonymous namespace |
1918 | namespace { |
1919 | at::Tensor wrapper_CompositeImplicitAutograd__conv2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { |
1920 | // No device check |
1921 | // DeviceGuard omitted |
1922 | return at::native::conv2d(input, weight, bias, stride, padding, dilation, groups); |
1923 | } |
1924 | } // anonymous namespace |
1925 | namespace { |
1926 | at::Tensor wrapper_CompositeImplicitAutograd__conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { |
1927 | // No device check |
1928 | // DeviceGuard omitted |
1929 | return at::native::conv3d(input, weight, bias, stride, padding, dilation, groups); |
1930 | } |
1931 | } // anonymous namespace |
1932 | namespace { |
1933 | at::Tensor wrapper_CompositeImplicitAutograd_padding_conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) { |
1934 | // No device check |
1935 | // DeviceGuard omitted |
1936 | return at::native::conv1d(input, weight, bias, stride, padding, dilation, groups); |
1937 | } |
1938 | } // anonymous namespace |
1939 | namespace { |
1940 | at::Tensor wrapper_CompositeImplicitAutograd_padding_conv2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) { |
1941 | // No device check |
1942 | // DeviceGuard omitted |
1943 | return at::native::conv2d(input, weight, bias, stride, padding, dilation, groups); |
1944 | } |
1945 | } // anonymous namespace |
1946 | namespace { |
1947 | at::Tensor wrapper_CompositeImplicitAutograd_padding_conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) { |
1948 | // No device check |
1949 | // DeviceGuard omitted |
1950 | return at::native::conv3d(input, weight, bias, stride, padding, dilation, groups); |
1951 | } |
1952 | } // anonymous namespace |
1953 | namespace { |
1954 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__conv_tbc_backward(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) { |
1955 | // No device check |
1956 | // DeviceGuard omitted |
1957 | return at::native::conv_tbc_backward(self, input, weight, bias, pad); |
1958 | } |
1959 | } // anonymous namespace |
1960 | namespace { |
1961 | at::Tensor wrapper_CompositeImplicitAutograd__conv_transpose1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) { |
1962 | // No device check |
1963 | // DeviceGuard omitted |
1964 | return at::native::conv_transpose1d(input, weight, bias, stride, padding, output_padding, groups, dilation); |
1965 | } |
1966 | } // anonymous namespace |
1967 | namespace { |
1968 | at::Tensor wrapper_CompositeImplicitAutograd_input_conv_transpose2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) { |
1969 | // No device check |
1970 | // DeviceGuard omitted |
1971 | return at::native::conv_transpose2d(input, weight, bias, stride, padding, output_padding, groups, dilation); |
1972 | } |
1973 | } // anonymous namespace |
1974 | namespace { |
1975 | at::Tensor wrapper_CompositeImplicitAutograd_input_conv_transpose3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) { |
1976 | // No device check |
1977 | // DeviceGuard omitted |
1978 | return at::native::conv_transpose3d(input, weight, bias, stride, padding, output_padding, groups, dilation); |
1979 | } |
1980 | } // anonymous namespace |
1981 | namespace { |
1982 | at::Tensor wrapper_CompositeImplicitAutograd__cosine_embedding_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) { |
1983 | // No device check |
1984 | // DeviceGuard omitted |
1985 | return at::native::cosine_embedding_loss(input1, input2, target, margin, reduction); |
1986 | } |
1987 | } // anonymous namespace |
1988 | namespace { |
1989 | at::Tensor wrapper_CompositeImplicitAutograd__cov(const at::Tensor & self, int64_t correction, const c10::optional<at::Tensor> & fweights, const c10::optional<at::Tensor> & aweights) { |
1990 | // No device check |
1991 | // DeviceGuard omitted |
1992 | return at::native::cov(self, correction, fweights, aweights); |
1993 | } |
1994 | } // anonymous namespace |
1995 | namespace { |
1996 | at::Tensor wrapper_CompositeImplicitAutograd__corrcoef(const at::Tensor & self) { |
1997 | // No device check |
1998 | // DeviceGuard omitted |
1999 | return at::native::corrcoef(self); |
2000 | } |
2001 | } // anonymous namespace |
2002 | namespace { |
2003 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dimname_cummax(const at::Tensor & self, at::Dimname dim) { |
2004 | // No device check |
2005 | // DeviceGuard omitted |
2006 | return at::native::cummax(self, dim); |
2007 | } |
2008 | } // anonymous namespace |
2009 | namespace { |
2010 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_dimname_out_cummax_out(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) { |
2011 | // No device check |
2012 | // DeviceGuard omitted |
2013 | return at::native::cummax_out(self, dim, values, indices); |
2014 | } |
2015 | } // anonymous namespace |
2016 | namespace { |
2017 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dimname_cummin(const at::Tensor & self, at::Dimname dim) { |
2018 | // No device check |
2019 | // DeviceGuard omitted |
2020 | return at::native::cummin(self, dim); |
2021 | } |
2022 | } // anonymous namespace |
2023 | namespace { |
2024 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_dimname_out_cummin_out(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) { |
2025 | // No device check |
2026 | // DeviceGuard omitted |
2027 | return at::native::cummin_out(self, dim, values, indices); |
2028 | } |
2029 | } // anonymous namespace |
2030 | namespace { |
2031 | at::Tensor wrapper_CompositeImplicitAutograd__cummaxmin_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) { |
2032 | // No device check |
2033 | // DeviceGuard omitted |
2034 | return at::native::cummaxmin_backward(grad, input, indices, dim); |
2035 | } |
2036 | } // anonymous namespace |
2037 | namespace { |
2038 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_cumprod(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
2039 | // No device check |
2040 | // DeviceGuard omitted |
2041 | return at::native::cumprod(self, dim, dtype); |
2042 | } |
2043 | } // anonymous namespace |
2044 | namespace { |
2045 | at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_cumprod_out(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
2046 | // No device check |
2047 | // DeviceGuard omitted |
2048 | return at::native::cumprod_out(self, dim, dtype, out); |
2049 | } |
2050 | } // anonymous namespace |
2051 | namespace { |
2052 | at::Tensor & wrapper_CompositeImplicitAutograd_dimname_cumprod_(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
2053 | // No device check |
2054 | // DeviceGuard omitted |
2055 | return at::native::cumprod_(self, dim, dtype); |
2056 | } |
2057 | } // anonymous namespace |
2058 | namespace { |
2059 | at::Tensor wrapper_CompositeImplicitAutograd__cumprod_backward(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) { |
2060 | // No device check |
2061 | // DeviceGuard omitted |
2062 | return at::native::cumprod_backward(grad, input, dim, output); |
2063 | } |
2064 | } // anonymous namespace |
2065 | namespace { |
2066 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_cumsum(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
2067 | // No device check |
2068 | // DeviceGuard omitted |
2069 | return at::native::cumsum(self, dim, dtype); |
2070 | } |
2071 | } // anonymous namespace |
2072 | namespace { |
2073 | at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_cumsum_out(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
2074 | // No device check |
2075 | // DeviceGuard omitted |
2076 | return at::native::cumsum_out(self, dim, dtype, out); |
2077 | } |
2078 | } // anonymous namespace |
2079 | namespace { |
2080 | at::Tensor & wrapper_CompositeImplicitAutograd_dimname_cumsum_(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
2081 | // No device check |
2082 | // DeviceGuard omitted |
2083 | return at::native::cumsum_(self, dim, dtype); |
2084 | } |
2085 | } // anonymous namespace |
2086 | namespace { |
2087 | at::Tensor wrapper_CompositeImplicitAutograd_x_cumulative_trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim) { |
2088 | // No device check |
2089 | // DeviceGuard omitted |
2090 | return at::native::cumulative_trapezoid(y, x, dim); |
2091 | } |
2092 | } // anonymous namespace |
2093 | namespace { |
2094 | at::Tensor wrapper_CompositeImplicitAutograd_dx_cumulative_trapezoid(const at::Tensor & y, const at::Scalar & dx, int64_t dim) { |
2095 | // No device check |
2096 | // DeviceGuard omitted |
2097 | return at::native::cumulative_trapezoid(y, dx, dim); |
2098 | } |
2099 | } // anonymous namespace |
2100 | namespace { |
2101 | at::Tensor wrapper_CompositeImplicitAutograd_IntList_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) { |
2102 | // No device check |
2103 | // DeviceGuard omitted |
2104 | return at::native::ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); |
2105 | } |
2106 | } // anonymous namespace |
2107 | namespace { |
2108 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) { |
2109 | // No device check |
2110 | // DeviceGuard omitted |
2111 | return at::native::ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); |
2112 | } |
2113 | } // anonymous namespace |
2114 | namespace { |
2115 | at::Tensor wrapper_CompositeImplicitAutograd__diagflat(const at::Tensor & self, int64_t offset) { |
2116 | // No device check |
2117 | // DeviceGuard omitted |
2118 | return at::native::diagflat(self, offset); |
2119 | } |
2120 | } // anonymous namespace |
2121 | namespace { |
2122 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_diagonal(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) { |
2123 | // No device check |
2124 | // DeviceGuard omitted |
2125 | return at::native::linalg_diagonal(A, offset, dim1, dim2); |
2126 | } |
2127 | } // anonymous namespace |
2128 | namespace { |
2129 | at::Tensor wrapper_CompositeImplicitAutograd_Dimname_diagonal(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) { |
2130 | // No device check |
2131 | // DeviceGuard omitted |
2132 | return at::native::diagonal(self, outdim, dim1, dim2, offset); |
2133 | } |
2134 | } // anonymous namespace |
2135 | namespace { |
2136 | at::Tensor & wrapper_CompositeImplicitAutograd__fill_diagonal_(at::Tensor & self, const at::Scalar & fill_value, bool wrap) { |
2137 | // No device check |
2138 | // DeviceGuard omitted |
2139 | return at::native::fill_diagonal_(self, fill_value, wrap); |
2140 | } |
2141 | } // anonymous namespace |
2142 | namespace { |
2143 | at::Tensor wrapper_CompositeImplicitAutograd__diff(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append) { |
2144 | // No device check |
2145 | // DeviceGuard omitted |
2146 | return at::native::diff(self, n, dim, prepend, append); |
2147 | } |
2148 | } // anonymous namespace |
2149 | namespace { |
2150 | at::Tensor & wrapper_CompositeImplicitAutograd_out_diff_out(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append, at::Tensor & out) { |
2151 | // No device check |
2152 | // DeviceGuard omitted |
2153 | return at::native::diff_out(self, n, dim, prepend, append, out); |
2154 | } |
2155 | } // anonymous namespace |
2156 | namespace { |
2157 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_scalarint_gradient(const at::Tensor & self, const c10::optional<at::Scalar> & spacing, c10::optional<int64_t> dim, int64_t edge_order) { |
2158 | // No device check |
2159 | // DeviceGuard omitted |
2160 | return at::native::gradient(self, spacing, dim, edge_order); |
2161 | } |
2162 | } // anonymous namespace |
2163 | namespace { |
2164 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_scalararray_gradient(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) { |
2165 | // No device check |
2166 | // DeviceGuard omitted |
2167 | return at::native::gradient(self, spacing, dim, edge_order); |
2168 | } |
2169 | } // anonymous namespace |
2170 | namespace { |
2171 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_array_gradient(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) { |
2172 | // No device check |
2173 | // DeviceGuard omitted |
2174 | return at::native::gradient(self, dim, edge_order); |
2175 | } |
2176 | } // anonymous namespace |
2177 | namespace { |
2178 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_scalarrayint_gradient(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, c10::optional<int64_t> dim, int64_t edge_order) { |
2179 | // No device check |
2180 | // DeviceGuard omitted |
2181 | return at::native::gradient(self, spacing, dim, edge_order); |
2182 | } |
2183 | } // anonymous namespace |
2184 | namespace { |
2185 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_scalarrayarray_gradient(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order) { |
2186 | // No device check |
2187 | // DeviceGuard omitted |
2188 | return at::native::gradient(self, spacing, dim, edge_order); |
2189 | } |
2190 | } // anonymous namespace |
2191 | namespace { |
2192 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_tensorarrayint_gradient(const at::Tensor & self, at::TensorList spacing, c10::optional<int64_t> dim, int64_t edge_order) { |
2193 | // No device check |
2194 | // DeviceGuard omitted |
2195 | return at::native::gradient(self, spacing, dim, edge_order); |
2196 | } |
2197 | } // anonymous namespace |
2198 | namespace { |
2199 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_tensorarray_gradient(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) { |
2200 | // No device check |
2201 | // DeviceGuard omitted |
2202 | return at::native::gradient(self, spacing, dim, edge_order); |
2203 | } |
2204 | } // anonymous namespace |
2205 | namespace { |
2206 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_divide(const at::Tensor & self, const at::Tensor & other) { |
2207 | // No device check |
2208 | // DeviceGuard omitted |
2209 | return at::native::divide(self, other); |
2210 | } |
2211 | } // anonymous namespace |
2212 | namespace { |
2213 | at::Tensor & wrapper_CompositeImplicitAutograd_out_divide_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
2214 | // No device check |
2215 | // DeviceGuard omitted |
2216 | return at::native::divide_out(self, other, out); |
2217 | } |
2218 | } // anonymous namespace |
2219 | namespace { |
2220 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_divide_(at::Tensor & self, const at::Tensor & other) { |
2221 | // No device check |
2222 | // DeviceGuard omitted |
2223 | return at::native::divide_(self, other); |
2224 | } |
2225 | } // anonymous namespace |
2226 | namespace { |
2227 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_divide(const at::Tensor & self, const at::Scalar & other) { |
2228 | // No device check |
2229 | // DeviceGuard omitted |
2230 | return at::native::divide(self, other); |
2231 | } |
2232 | } // anonymous namespace |
2233 | namespace { |
2234 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_divide_(at::Tensor & self, const at::Scalar & other) { |
2235 | // No device check |
2236 | // DeviceGuard omitted |
2237 | return at::native::divide_(self, other); |
2238 | } |
2239 | } // anonymous namespace |
2240 | namespace { |
2241 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_mode_divide(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) { |
2242 | // No device check |
2243 | // DeviceGuard omitted |
2244 | return at::native::divide(self, other, rounding_mode); |
2245 | } |
2246 | } // anonymous namespace |
2247 | namespace { |
2248 | at::Tensor & wrapper_CompositeImplicitAutograd_out_mode_divide_out(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) { |
2249 | // No device check |
2250 | // DeviceGuard omitted |
2251 | return at::native::divide_out(self, other, rounding_mode, out); |
2252 | } |
2253 | } // anonymous namespace |
2254 | namespace { |
2255 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_mode_divide_(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) { |
2256 | // No device check |
2257 | // DeviceGuard omitted |
2258 | return at::native::divide_(self, other, rounding_mode); |
2259 | } |
2260 | } // anonymous namespace |
2261 | namespace { |
2262 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_mode_divide(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) { |
2263 | // No device check |
2264 | // DeviceGuard omitted |
2265 | return at::native::divide(self, other, rounding_mode); |
2266 | } |
2267 | } // anonymous namespace |
2268 | namespace { |
2269 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_mode_divide_(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) { |
2270 | // No device check |
2271 | // DeviceGuard omitted |
2272 | return at::native::divide_(self, other, rounding_mode); |
2273 | } |
2274 | } // anonymous namespace |
2275 | namespace { |
2276 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_true_divide(const at::Tensor & self, const at::Tensor & other) { |
2277 | // No device check |
2278 | // DeviceGuard omitted |
2279 | return at::native::true_divide(self, other); |
2280 | } |
2281 | } // anonymous namespace |
2282 | namespace { |
2283 | at::Tensor & wrapper_CompositeImplicitAutograd_out_true_divide_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
2284 | // No device check |
2285 | // DeviceGuard omitted |
2286 | return at::native::true_divide_out(self, other, out); |
2287 | } |
2288 | } // anonymous namespace |
2289 | namespace { |
2290 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_true_divide_(at::Tensor & self, const at::Tensor & other) { |
2291 | // No device check |
2292 | // DeviceGuard omitted |
2293 | return at::native::true_divide_(self, other); |
2294 | } |
2295 | } // anonymous namespace |
2296 | namespace { |
2297 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_true_divide(const at::Tensor & self, const at::Scalar & other) { |
2298 | // No device check |
2299 | // DeviceGuard omitted |
2300 | return at::native::true_divide(self, other); |
2301 | } |
2302 | } // anonymous namespace |
2303 | namespace { |
2304 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_true_divide_(at::Tensor & self, const at::Scalar & other) { |
2305 | // No device check |
2306 | // DeviceGuard omitted |
2307 | return at::native::true_divide_(self, other); |
2308 | } |
2309 | } // anonymous namespace |
2310 | namespace { |
2311 | at::Tensor wrapper_CompositeImplicitAutograd__einsum(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) { |
2312 | // No device check |
2313 | // DeviceGuard omitted |
2314 | return at::native::einsum(equation, tensors, path); |
2315 | } |
2316 | } // anonymous namespace |
2317 | namespace { |
2318 | at::Tensor wrapper_CompositeImplicitAutograd__embedding_backward(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) { |
2319 | // No device check |
2320 | // DeviceGuard omitted |
2321 | return at::native::embedding_backward_symint(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse); |
2322 | } |
2323 | } // anonymous namespace |
2324 | namespace { |
2325 | at::Tensor wrapper_CompositeImplicitAutograd__embedding_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { |
2326 | // No device check |
2327 | // DeviceGuard omitted |
2328 | return at::native::embedding_sparse_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq); |
2329 | } |
2330 | } // anonymous namespace |
2331 | namespace { |
2332 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___rowwise_prune(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) { |
2333 | // No device check |
2334 | // DeviceGuard omitted |
2335 | return at::native::_rowwise_prune(weight, mask, compressed_indices_dtype); |
2336 | } |
2337 | } // anonymous namespace |
2338 | namespace { |
2339 | at::Tensor wrapper_CompositeImplicitAutograd__row_stack(at::TensorList tensors) { |
2340 | // No device check |
2341 | // DeviceGuard omitted |
2342 | return at::native::row_stack(tensors); |
2343 | } |
2344 | } // anonymous namespace |
2345 | namespace { |
2346 | at::Tensor & wrapper_CompositeImplicitAutograd_out_row_stack_out(at::TensorList tensors, at::Tensor & out) { |
2347 | // No device check |
2348 | // DeviceGuard omitted |
2349 | return at::native::row_stack_out(tensors, out); |
2350 | } |
2351 | } // anonymous namespace |
2352 | namespace { |
2353 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset) { |
2354 | // No device check |
2355 | // DeviceGuard omitted |
2356 | return at::native::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset); |
2357 | } |
2358 | } // anonymous namespace |
2359 | namespace { |
2360 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_padding_idx_embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx) { |
2361 | // No device check |
2362 | // DeviceGuard omitted |
2363 | return at::native::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); |
2364 | } |
2365 | } // anonymous namespace |
2366 | namespace { |
2367 | at::Tensor wrapper_CompositeImplicitAutograd___embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) { |
2368 | // No device check |
2369 | // DeviceGuard omitted |
2370 | return at::native::_embedding_bag_backward_symint(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx); |
2371 | } |
2372 | } // anonymous namespace |
2373 | namespace { |
2374 | at::Tensor wrapper_CompositeImplicitAutograd___embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) { |
2375 | // No device check |
2376 | // DeviceGuard omitted |
2377 | return at::native::_embedding_bag_sparse_backward_symint(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); |
2378 | } |
2379 | } // anonymous namespace |
2380 | namespace { |
2381 | at::Tensor & wrapper_CompositeImplicitAutograd_out_empty_out(c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
2382 | // No device check |
2383 | // DeviceGuard omitted |
2384 | return at::native::empty_out(C10_AS_INTARRAYREF_SLOW(size), memory_format, out); |
2385 | } |
2386 | } // anonymous namespace |
2387 | namespace { |
2388 | at::Tensor wrapper_CompositeImplicitAutograd__expand_as(const at::Tensor & self, const at::Tensor & other) { |
2389 | // No device check |
2390 | // DeviceGuard omitted |
2391 | return at::native::expand_as(self, other); |
2392 | } |
2393 | } // anonymous namespace |
2394 | namespace { |
2395 | at::Tensor wrapper_CompositeImplicitAutograd_using_ints_flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim) { |
2396 | // No device check |
2397 | // DeviceGuard omitted |
2398 | return at::native::flatten(self, start_dim, end_dim); |
2399 | } |
2400 | } // anonymous namespace |
2401 | namespace { |
2402 | at::Tensor wrapper_CompositeImplicitAutograd_named_out_dim_flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) { |
2403 | // No device check |
2404 | // DeviceGuard omitted |
2405 | return at::native::flatten(self, start_dim, end_dim, out_dim); |
2406 | } |
2407 | } // anonymous namespace |
2408 | namespace { |
2409 | at::Tensor wrapper_CompositeImplicitAutograd_using_names_flatten(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) { |
2410 | // No device check |
2411 | // DeviceGuard omitted |
2412 | return at::native::flatten(self, start_dim, end_dim, out_dim); |
2413 | } |
2414 | } // anonymous namespace |
2415 | namespace { |
2416 | at::Tensor wrapper_CompositeImplicitAutograd_DimnameList_flatten(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) { |
2417 | // No device check |
2418 | // DeviceGuard omitted |
2419 | return at::native::flatten(self, dims, out_dim); |
2420 | } |
2421 | } // anonymous namespace |
2422 | namespace { |
2423 | at::Tensor wrapper_CompositeImplicitAutograd_int_unflatten(const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) { |
2424 | // No device check |
2425 | // DeviceGuard omitted |
2426 | return at::native::unflatten(self, dim, sizes); |
2427 | } |
2428 | } // anonymous namespace |
2429 | namespace { |
2430 | at::Tensor wrapper_CompositeImplicitAutograd_Dimname_unflatten(const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) { |
2431 | // No device check |
2432 | // DeviceGuard omitted |
2433 | return at::native::unflatten(self, dim, sizes, names); |
2434 | } |
2435 | } // anonymous namespace |
2436 | namespace { |
2437 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_floor_divide(const at::Tensor & self, const at::Scalar & other) { |
2438 | // No device check |
2439 | // DeviceGuard omitted |
2440 | return at::native::floor_divide(self, other); |
2441 | } |
2442 | } // anonymous namespace |
2443 | namespace { |
2444 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_floor_divide_(at::Tensor & self, const at::Scalar & other) { |
2445 | // No device check |
2446 | // DeviceGuard omitted |
2447 | return at::native::floor_divide_(self, other); |
2448 | } |
2449 | } // anonymous namespace |
2450 | namespace { |
2451 | at::Tensor wrapper_CompositeImplicitAutograd__grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { |
2452 | // No device check |
2453 | // DeviceGuard omitted |
2454 | return at::native::grid_sampler(input, grid, interpolation_mode, padding_mode, align_corners); |
2455 | } |
2456 | } // anonymous namespace |
2457 | namespace { |
2458 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___grid_sampler_2d_cpu_fallback_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { |
2459 | // No device check |
2460 | // DeviceGuard omitted |
2461 | return at::native::_grid_sampler_2d_cpu_fallback_backward(grad_output, input, grid, interpolation_mode, padding_mode, align_corners); |
2462 | } |
2463 | } // anonymous namespace |
2464 | namespace { |
2465 | at::Tensor wrapper_CompositeImplicitAutograd__hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) { |
2466 | // No device check |
2467 | // DeviceGuard omitted |
2468 | return at::native::hinge_embedding_loss(self, target, margin, reduction); |
2469 | } |
2470 | } // anonymous namespace |
2471 | namespace { |
2472 | at::Tensor wrapper_CompositeImplicitAutograd__group_norm(const at::Tensor & input, int64_t num_groups, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enabled) { |
2473 | // No device check |
2474 | // DeviceGuard omitted |
2475 | return at::native::group_norm(input, num_groups, weight, bias, eps, cudnn_enabled); |
2476 | } |
2477 | } // anonymous namespace |
2478 | namespace { |
2479 | int64_t wrapper_CompositeImplicitAutograd___cufft_get_plan_cache_size(int64_t device_index) { |
2480 | // No device check |
2481 | // DeviceGuard omitted |
2482 | return at::native::_cufft_get_plan_cache_size(device_index); |
2483 | } |
2484 | } // anonymous namespace |
2485 | namespace { |
2486 | int64_t wrapper_CompositeImplicitAutograd___cufft_get_plan_cache_max_size(int64_t device_index) { |
2487 | // No device check |
2488 | // DeviceGuard omitted |
2489 | return at::native::_cufft_get_plan_cache_max_size(device_index); |
2490 | } |
2491 | } // anonymous namespace |
2492 | namespace { |
2493 | void wrapper_CompositeImplicitAutograd___cufft_set_plan_cache_max_size(int64_t device_index, int64_t max_size) { |
2494 | // No device check |
2495 | // DeviceGuard omitted |
2496 | return at::native::_cufft_set_plan_cache_max_size(device_index, max_size); |
2497 | } |
2498 | } // anonymous namespace |
2499 | namespace { |
2500 | void wrapper_CompositeImplicitAutograd___cufft_clear_plan_cache(int64_t device_index) { |
2501 | // No device check |
2502 | // DeviceGuard omitted |
2503 | return at::native::_cufft_clear_plan_cache(device_index); |
2504 | } |
2505 | } // anonymous namespace |
2506 | namespace { |
2507 | at::Tensor & wrapper_CompositeImplicitAutograd_dimname_index_copy_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { |
2508 | // No device check |
2509 | // DeviceGuard omitted |
2510 | return at::native::index_copy_(self, dim, index, source); |
2511 | } |
2512 | } // anonymous namespace |
2513 | namespace { |
2514 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { |
2515 | // No device check |
2516 | // DeviceGuard omitted |
2517 | return at::native::index_copy(self, dim, index, source); |
2518 | } |
2519 | } // anonymous namespace |
2520 | namespace { |
2521 | at::Tensor wrapper_CompositeImplicitAutograd__instance_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { |
2522 | // No device check |
2523 | // DeviceGuard omitted |
2524 | return at::native::instance_norm(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); |
2525 | } |
2526 | } // anonymous namespace |
2527 | namespace { |
2528 | at::Tensor wrapper_CompositeImplicitAutograd__isclose(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) { |
2529 | // No device check |
2530 | // DeviceGuard omitted |
2531 | return at::native::isclose(self, other, rtol, atol, equal_nan); |
2532 | } |
2533 | } // anonymous namespace |
2534 | namespace { |
2535 | bool wrapper_CompositeImplicitAutograd__is_distributed(const at::Tensor & self) { |
2536 | // No device check |
2537 | // DeviceGuard omitted |
2538 | return at::native::is_distributed(self); |
2539 | } |
2540 | } // anonymous namespace |
2541 | namespace { |
2542 | bool wrapper_CompositeImplicitAutograd__is_floating_point(const at::Tensor & self) { |
2543 | // No device check |
2544 | // DeviceGuard omitted |
2545 | return at::native::is_floating_point(self); |
2546 | } |
2547 | } // anonymous namespace |
2548 | namespace { |
2549 | bool wrapper_CompositeImplicitAutograd__is_complex(const at::Tensor & self) { |
2550 | // No device check |
2551 | // DeviceGuard omitted |
2552 | return at::native::is_complex(self); |
2553 | } |
2554 | } // anonymous namespace |
2555 | namespace { |
2556 | bool wrapper_CompositeImplicitAutograd__is_conj(const at::Tensor & self) { |
2557 | // No device check |
2558 | // DeviceGuard omitted |
2559 | return at::native::is_conj(self); |
2560 | } |
2561 | } // anonymous namespace |
2562 | namespace { |
2563 | bool wrapper_CompositeImplicitAutograd___is_zerotensor(const at::Tensor & self) { |
2564 | // No device check |
2565 | // DeviceGuard omitted |
2566 | return at::native::_is_zerotensor(self); |
2567 | } |
2568 | } // anonymous namespace |
2569 | namespace { |
2570 | bool wrapper_CompositeImplicitAutograd__is_neg(const at::Tensor & self) { |
2571 | // No device check |
2572 | // DeviceGuard omitted |
2573 | return at::native::is_neg(self); |
2574 | } |
2575 | } // anonymous namespace |
2576 | namespace { |
2577 | at::Tensor wrapper_CompositeImplicitAutograd__isreal(const at::Tensor & self) { |
2578 | // No device check |
2579 | // DeviceGuard omitted |
2580 | return at::native::isreal(self); |
2581 | } |
2582 | } // anonymous namespace |
2583 | namespace { |
2584 | bool wrapper_CompositeImplicitAutograd__is_nonzero(const at::Tensor & self) { |
2585 | // No device check |
2586 | // DeviceGuard omitted |
2587 | return at::native::is_nonzero(self); |
2588 | } |
2589 | } // anonymous namespace |
2590 | namespace { |
2591 | bool wrapper_CompositeImplicitAutograd__is_signed(const at::Tensor & self) { |
2592 | // No device check |
2593 | // DeviceGuard omitted |
2594 | return at::native::is_signed(self); |
2595 | } |
2596 | } // anonymous namespace |
2597 | namespace { |
2598 | bool wrapper_CompositeImplicitAutograd__is_inference(const at::Tensor & self) { |
2599 | // No device check |
2600 | // DeviceGuard omitted |
2601 | return at::native::is_inference(self); |
2602 | } |
2603 | } // anonymous namespace |
2604 | namespace { |
2605 | at::Tensor wrapper_CompositeImplicitAutograd__kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) { |
2606 | // No device check |
2607 | // DeviceGuard omitted |
2608 | return at::native::kl_div(self, target, reduction, log_target); |
2609 | } |
2610 | } // anonymous namespace |
2611 | namespace { |
2612 | at::Tensor wrapper_CompositeImplicitAutograd__kron(const at::Tensor & self, const at::Tensor & other) { |
2613 | // No device check |
2614 | // DeviceGuard omitted |
2615 | return at::native::kron(self, other); |
2616 | } |
2617 | } // anonymous namespace |
2618 | namespace { |
2619 | at::Tensor & wrapper_CompositeImplicitAutograd_out_kron_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
2620 | // No device check |
2621 | // DeviceGuard omitted |
2622 | return at::native::kron_out(self, other, out); |
2623 | } |
2624 | } // anonymous namespace |
2625 | namespace { |
2626 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dimname_kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) { |
2627 | // No device check |
2628 | // DeviceGuard omitted |
2629 | return at::native::kthvalue(self, k, dim, keepdim); |
2630 | } |
2631 | } // anonymous namespace |
2632 | namespace { |
2633 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_dimname_out_kthvalue_out(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
2634 | // No device check |
2635 | // DeviceGuard omitted |
2636 | return at::native::kthvalue_out(self, k, dim, keepdim, values, indices); |
2637 | } |
2638 | } // anonymous namespace |
2639 | namespace { |
2640 | at::Tensor wrapper_CompositeImplicitAutograd__layer_norm(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enable) { |
2641 | // No device check |
2642 | // DeviceGuard omitted |
2643 | return at::native::layer_norm_symint(input, normalized_shape, weight, bias, eps, cudnn_enable); |
2644 | } |
2645 | } // anonymous namespace |
2646 | namespace { |
2647 | at::Tensor wrapper_CompositeImplicitAutograd__linear(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) { |
2648 | // No device check |
2649 | // DeviceGuard omitted |
2650 | return at::native::linear(input, weight, bias); |
2651 | } |
2652 | } // anonymous namespace |
2653 | namespace { |
2654 | at::Tensor wrapper_CompositeImplicitAutograd__fbgemm_linear_int8_weight_fp32_activation(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) { |
2655 | // No device check |
2656 | // DeviceGuard omitted |
2657 | return at::native::fbgemm_linear_int8_weight_fp32_activation(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); |
2658 | } |
2659 | } // anonymous namespace |
2660 | namespace { |
2661 | at::Tensor wrapper_CompositeImplicitAutograd__fbgemm_linear_int8_weight(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) { |
2662 | // No device check |
2663 | // DeviceGuard omitted |
2664 | return at::native::fbgemm_linear_int8_weight(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); |
2665 | } |
2666 | } // anonymous namespace |
2667 | namespace { |
2668 | ::std::tuple<at::Tensor,at::Tensor,double,int64_t> wrapper_CompositeImplicitAutograd__fbgemm_linear_quantize_weight(const at::Tensor & input) { |
2669 | // No device check |
2670 | // DeviceGuard omitted |
2671 | return at::native::fbgemm_linear_quantize_weight(input); |
2672 | } |
2673 | } // anonymous namespace |
2674 | namespace { |
2675 | at::Tensor wrapper_CompositeImplicitAutograd__fbgemm_pack_gemm_matrix_fp16(const at::Tensor & input) { |
2676 | // No device check |
2677 | // DeviceGuard omitted |
2678 | return at::native::fbgemm_pack_gemm_matrix_fp16(input); |
2679 | } |
2680 | } // anonymous namespace |
2681 | namespace { |
2682 | at::Tensor wrapper_CompositeImplicitAutograd__fbgemm_linear_fp16_weight_fp32_activation(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) { |
2683 | // No device check |
2684 | // DeviceGuard omitted |
2685 | return at::native::fbgemm_linear_fp16_weight_fp32_activation(input, packed_weight, bias); |
2686 | } |
2687 | } // anonymous namespace |
2688 | namespace { |
2689 | at::Tensor wrapper_CompositeImplicitAutograd__fbgemm_linear_fp16_weight(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) { |
2690 | // No device check |
2691 | // DeviceGuard omitted |
2692 | return at::native::fbgemm_linear_fp16_weight(input, packed_weight, bias); |
2693 | } |
2694 | } // anonymous namespace |
2695 | namespace { |
2696 | at::Tensor wrapper_CompositeImplicitAutograd__fbgemm_pack_quantized_matrix(const at::Tensor & input) { |
2697 | // No device check |
2698 | // DeviceGuard omitted |
2699 | return at::native::fbgemm_pack_quantized_matrix(input); |
2700 | } |
2701 | } // anonymous namespace |
2702 | namespace { |
2703 | at::Tensor wrapper_CompositeImplicitAutograd_KN_fbgemm_pack_quantized_matrix(const at::Tensor & input, int64_t K, int64_t N) { |
2704 | // No device check |
2705 | // DeviceGuard omitted |
2706 | return at::native::fbgemm_pack_quantized_matrix(input, K, N); |
2707 | } |
2708 | } // anonymous namespace |
2709 | namespace { |
2710 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_ldexp(const at::Tensor & self, const at::Tensor & other) { |
2711 | // No device check |
2712 | // DeviceGuard omitted |
2713 | return at::native::ldexp(self, other); |
2714 | } |
2715 | } // anonymous namespace |
2716 | namespace { |
2717 | at::Tensor & wrapper_CompositeImplicitAutograd_out_ldexp_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
2718 | // No device check |
2719 | // DeviceGuard omitted |
2720 | return at::native::ldexp_out(self, other, out); |
2721 | } |
2722 | } // anonymous namespace |
2723 | namespace { |
2724 | at::Tensor & wrapper_CompositeImplicitAutograd__ldexp_(at::Tensor & self, const at::Tensor & other) { |
2725 | // No device check |
2726 | // DeviceGuard omitted |
2727 | return at::native::ldexp_(self, other); |
2728 | } |
2729 | } // anonymous namespace |
2730 | namespace { |
2731 | at::Tensor wrapper_CompositeImplicitAutograd_int_log_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
2732 | // No device check |
2733 | // DeviceGuard omitted |
2734 | return at::native::log_softmax(self, dim, dtype); |
2735 | } |
2736 | } // anonymous namespace |
2737 | namespace { |
2738 | at::Tensor wrapper_CompositeImplicitAutograd_Dimname_log_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
2739 | // No device check |
2740 | // DeviceGuard omitted |
2741 | return at::native::log_softmax(self, dim, dtype); |
2742 | } |
2743 | } // anonymous namespace |
2744 | namespace { |
2745 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_logcumsumexp(const at::Tensor & self, at::Dimname dim) { |
2746 | // No device check |
2747 | // DeviceGuard omitted |
2748 | return at::native::logcumsumexp(self, dim); |
2749 | } |
2750 | } // anonymous namespace |
2751 | namespace { |
2752 | at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_logcumsumexp_out(const at::Tensor & self, at::Dimname dim, at::Tensor & out) { |
2753 | // No device check |
2754 | // DeviceGuard omitted |
2755 | return at::native::logcumsumexp_out(self, dim, out); |
2756 | } |
2757 | } // anonymous namespace |
2758 | namespace { |
2759 | at::Tensor wrapper_CompositeImplicitAutograd_names_logsumexp(const at::Tensor & self, at::DimnameList dim, bool keepdim) { |
2760 | // No device check |
2761 | // DeviceGuard omitted |
2762 | return at::native::logsumexp(self, dim, keepdim); |
2763 | } |
2764 | } // anonymous namespace |
2765 | namespace { |
2766 | at::Tensor & wrapper_CompositeImplicitAutograd_names_out_logsumexp_out(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) { |
2767 | // No device check |
2768 | // DeviceGuard omitted |
2769 | return at::native::logsumexp_out(self, dim, keepdim, out); |
2770 | } |
2771 | } // anonymous namespace |
2772 | namespace { |
2773 | at::Tensor wrapper_CompositeImplicitAutograd__margin_ranking_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) { |
2774 | // No device check |
2775 | // DeviceGuard omitted |
2776 | return at::native::margin_ranking_loss(input1, input2, target, margin, reduction); |
2777 | } |
2778 | } // anonymous namespace |
2779 | namespace { |
2780 | at::Tensor wrapper_CompositeImplicitAutograd__matmul(const at::Tensor & self, const at::Tensor & other) { |
2781 | // No device check |
2782 | // DeviceGuard omitted |
2783 | return at::native::matmul(self, other); |
2784 | } |
2785 | } // anonymous namespace |
2786 | namespace { |
2787 | at::Tensor & wrapper_CompositeImplicitAutograd_out_matmul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
2788 | // No device check |
2789 | // DeviceGuard omitted |
2790 | return at::native::matmul_out(self, other, out); |
2791 | } |
2792 | } // anonymous namespace |
2793 | namespace { |
2794 | at::Tensor wrapper_CompositeImplicitAutograd__matrix_power(const at::Tensor & self, int64_t n) { |
2795 | // No device check |
2796 | // DeviceGuard omitted |
2797 | return at::native::matrix_power(self, n); |
2798 | } |
2799 | } // anonymous namespace |
2800 | namespace { |
2801 | at::Tensor & wrapper_CompositeImplicitAutograd_out_matrix_power_out(const at::Tensor & self, int64_t n, at::Tensor & out) { |
2802 | // No device check |
2803 | // DeviceGuard omitted |
2804 | return at::native::matrix_power_out(self, n, out); |
2805 | } |
2806 | } // anonymous namespace |
2807 | namespace { |
2808 | at::Tensor wrapper_CompositeImplicitAutograd__matrix_exp(const at::Tensor & self) { |
2809 | // No device check |
2810 | // DeviceGuard omitted |
2811 | return at::native::matrix_exp(self); |
2812 | } |
2813 | } // anonymous namespace |
2814 | namespace { |
2815 | at::Tensor wrapper_CompositeImplicitAutograd__matrix_exp_backward(const at::Tensor & self, const at::Tensor & grad) { |
2816 | // No device check |
2817 | // DeviceGuard omitted |
2818 | return at::native::matrix_exp_backward(self, grad); |
2819 | } |
2820 | } // anonymous namespace |
2821 | namespace { |
2822 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_names_dim_max(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
2823 | // No device check |
2824 | // DeviceGuard omitted |
2825 | return at::native::max(self, dim, keepdim); |
2826 | } |
2827 | } // anonymous namespace |
2828 | namespace { |
2829 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_names_dim_max_max_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) { |
2830 | // No device check |
2831 | // DeviceGuard omitted |
2832 | return at::native::max_out(self, dim, keepdim, max, max_values); |
2833 | } |
2834 | } // anonymous namespace |
2835 | namespace { |
2836 | at::Tensor wrapper_CompositeImplicitAutograd__value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) { |
2837 | // No device check |
2838 | // DeviceGuard omitted |
2839 | return at::native::value_selecting_reduction_backward_symint(grad, dim, indices, sizes, keepdim); |
2840 | } |
2841 | } // anonymous namespace |
2842 | namespace { |
2843 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__max_pool1d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
2844 | // No device check |
2845 | // DeviceGuard omitted |
2846 | return at::native::max_pool1d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode); |
2847 | } |
2848 | } // anonymous namespace |
2849 | namespace { |
2850 | at::Tensor wrapper_CompositeImplicitAutograd__max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
2851 | // No device check |
2852 | // DeviceGuard omitted |
2853 | return at::native::max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode); |
2854 | } |
2855 | } // anonymous namespace |
2856 | namespace { |
2857 | at::Tensor wrapper_CompositeImplicitAutograd__max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
2858 | // No device check |
2859 | // DeviceGuard omitted |
2860 | return at::native::max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode); |
2861 | } |
2862 | } // anonymous namespace |
2863 | namespace { |
2864 | at::Tensor wrapper_CompositeImplicitAutograd__max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
2865 | // No device check |
2866 | // DeviceGuard omitted |
2867 | return at::native::max_pool3d(self, kernel_size, stride, padding, dilation, ceil_mode); |
2868 | } |
2869 | } // anonymous namespace |
2870 | namespace { |
2871 | at::Tensor wrapper_CompositeImplicitAutograd_names_dim_mean(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
2872 | // No device check |
2873 | // DeviceGuard omitted |
2874 | return at::native::mean(self, dim, keepdim, dtype); |
2875 | } |
2876 | } // anonymous namespace |
2877 | namespace { |
2878 | at::Tensor & wrapper_CompositeImplicitAutograd_names_out_mean_out(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
2879 | // No device check |
2880 | // DeviceGuard omitted |
2881 | return at::native::mean_out(self, dim, keepdim, dtype, out); |
2882 | } |
2883 | } // anonymous namespace |
2884 | namespace { |
2885 | at::Tensor wrapper_CompositeImplicitAutograd__nanmean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
2886 | // No device check |
2887 | // DeviceGuard omitted |
2888 | return at::native::nanmean(self, dim, keepdim, dtype); |
2889 | } |
2890 | } // anonymous namespace |
2891 | namespace { |
2892 | at::Tensor & wrapper_CompositeImplicitAutograd_out_nanmean_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
2893 | // No device check |
2894 | // DeviceGuard omitted |
2895 | return at::native::nanmean_out(self, dim, keepdim, dtype, out); |
2896 | } |
2897 | } // anonymous namespace |
2898 | namespace { |
2899 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_names_dim_median(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
2900 | // No device check |
2901 | // DeviceGuard omitted |
2902 | return at::native::median(self, dim, keepdim); |
2903 | } |
2904 | } // anonymous namespace |
2905 | namespace { |
2906 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_names_dim_values_median_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
2907 | // No device check |
2908 | // DeviceGuard omitted |
2909 | return at::native::median_out(self, dim, keepdim, values, indices); |
2910 | } |
2911 | } // anonymous namespace |
2912 | namespace { |
2913 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_names_dim_nanmedian(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
2914 | // No device check |
2915 | // DeviceGuard omitted |
2916 | return at::native::nanmedian(self, dim, keepdim); |
2917 | } |
2918 | } // anonymous namespace |
2919 | namespace { |
2920 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_names_dim_values_nanmedian_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
2921 | // No device check |
2922 | // DeviceGuard omitted |
2923 | return at::native::nanmedian_out(self, dim, keepdim, values, indices); |
2924 | } |
2925 | } // anonymous namespace |
2926 | namespace { |
2927 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_names_dim_min(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
2928 | // No device check |
2929 | // DeviceGuard omitted |
2930 | return at::native::min(self, dim, keepdim); |
2931 | } |
2932 | } // anonymous namespace |
2933 | namespace { |
2934 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_names_dim_min_min_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) { |
2935 | // No device check |
2936 | // DeviceGuard omitted |
2937 | return at::native::min_out(self, dim, keepdim, min, min_indices); |
2938 | } |
2939 | } // anonymous namespace |
2940 | namespace { |
2941 | at::Tensor wrapper_CompositeImplicitAutograd___sparse_mm(const at::Tensor & sparse, const at::Tensor & dense) { |
2942 | // No device check |
2943 | // DeviceGuard omitted |
2944 | return at::native::_sparse_mm(sparse, dense); |
2945 | } |
2946 | } // anonymous namespace |
2947 | namespace { |
2948 | at::Tensor wrapper_CompositeImplicitAutograd_reduce__sparse_mm(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) { |
2949 | // No device check |
2950 | // DeviceGuard omitted |
2951 | return at::native::_sparse_mm(sparse, dense, reduce); |
2952 | } |
2953 | } // anonymous namespace |
2954 | namespace { |
2955 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dimname_mode(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
2956 | // No device check |
2957 | // DeviceGuard omitted |
2958 | return at::native::mode(self, dim, keepdim); |
2959 | } |
2960 | } // anonymous namespace |
2961 | namespace { |
2962 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_dimname_out_mode_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
2963 | // No device check |
2964 | // DeviceGuard omitted |
2965 | return at::native::mode_out(self, dim, keepdim, values, indices); |
2966 | } |
2967 | } // anonymous namespace |
2968 | namespace { |
2969 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_multiply(const at::Tensor & self, const at::Tensor & other) { |
2970 | // No device check |
2971 | // DeviceGuard omitted |
2972 | return at::native::multiply(self, other); |
2973 | } |
2974 | } // anonymous namespace |
2975 | namespace { |
2976 | at::Tensor & wrapper_CompositeImplicitAutograd_out_multiply_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
2977 | // No device check |
2978 | // DeviceGuard omitted |
2979 | return at::native::multiply_out(self, other, out); |
2980 | } |
2981 | } // anonymous namespace |
2982 | namespace { |
2983 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_multiply_(at::Tensor & self, const at::Tensor & other) { |
2984 | // No device check |
2985 | // DeviceGuard omitted |
2986 | return at::native::multiply_(self, other); |
2987 | } |
2988 | } // anonymous namespace |
2989 | namespace { |
2990 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_multiply(const at::Tensor & self, const at::Scalar & other) { |
2991 | // No device check |
2992 | // DeviceGuard omitted |
2993 | return at::native::multiply(self, other); |
2994 | } |
2995 | } // anonymous namespace |
2996 | namespace { |
2997 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_multiply_(at::Tensor & self, const at::Scalar & other) { |
2998 | // No device check |
2999 | // DeviceGuard omitted |
3000 | return at::native::multiply_(self, other); |
3001 | } |
3002 | } // anonymous namespace |
3003 | namespace { |
3004 | at::Tensor wrapper_CompositeImplicitAutograd__narrow(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { |
3005 | // No device check |
3006 | // DeviceGuard omitted |
3007 | return at::native::narrow_symint(self, dim, start, length); |
3008 | } |
3009 | } // anonymous namespace |
3010 | namespace { |
3011 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_narrow(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) { |
3012 | // No device check |
3013 | // DeviceGuard omitted |
3014 | return at::native::narrow_tensor_symint(self, dim, start, length); |
3015 | } |
3016 | } // anonymous namespace |
3017 | namespace { |
3018 | bool wrapper_CompositeImplicitAutograd__is_vulkan_available() { |
3019 | // No device check |
3020 | // DeviceGuard omitted |
3021 | return at::native::is_vulkan_available(); |
3022 | } |
3023 | } // anonymous namespace |
3024 | namespace { |
3025 | bool wrapper_CompositeImplicitAutograd___nnpack_available() { |
3026 | // No device check |
3027 | // DeviceGuard omitted |
3028 | return at::native::_nnpack_available(); |
3029 | } |
3030 | } // anonymous namespace |
3031 | namespace { |
3032 | at::Tensor wrapper_CompositeImplicitAutograd__pairwise_distance(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) { |
3033 | // No device check |
3034 | // DeviceGuard omitted |
3035 | return at::native::pairwise_distance(x1, x2, p, eps, keepdim); |
3036 | } |
3037 | } // anonymous namespace |
3038 | namespace { |
3039 | at::Tensor wrapper_CompositeImplicitAutograd__cdist(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) { |
3040 | // No device check |
3041 | // DeviceGuard omitted |
3042 | return at::native::cdist(x1, x2, p, compute_mode); |
3043 | } |
3044 | } // anonymous namespace |
3045 | namespace { |
3046 | at::Tensor wrapper_CompositeImplicitAutograd__pdist(const at::Tensor & self, double p) { |
3047 | // No device check |
3048 | // DeviceGuard omitted |
3049 | return at::native::pdist(self, p); |
3050 | } |
3051 | } // anonymous namespace |
3052 | namespace { |
3053 | at::Tensor wrapper_CompositeImplicitAutograd__cosine_similarity(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) { |
3054 | // No device check |
3055 | // DeviceGuard omitted |
3056 | return at::native::cosine_similarity(x1, x2, dim, eps); |
3057 | } |
3058 | } // anonymous namespace |
3059 | namespace { |
3060 | at::Tensor wrapper_CompositeImplicitAutograd_intlist_movedim(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { |
3061 | // No device check |
3062 | // DeviceGuard omitted |
3063 | return at::native::movedim(self, source, destination); |
3064 | } |
3065 | } // anonymous namespace |
3066 | namespace { |
3067 | at::Tensor wrapper_CompositeImplicitAutograd_int_movedim(const at::Tensor & self, int64_t source, int64_t destination) { |
3068 | // No device check |
3069 | // DeviceGuard omitted |
3070 | return at::native::movedim(self, source, destination); |
3071 | } |
3072 | } // anonymous namespace |
3073 | namespace { |
3074 | at::Tensor wrapper_CompositeImplicitAutograd_intlist_moveaxis(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { |
3075 | // No device check |
3076 | // DeviceGuard omitted |
3077 | return at::native::moveaxis(self, source, destination); |
3078 | } |
3079 | } // anonymous namespace |
3080 | namespace { |
3081 | at::Tensor wrapper_CompositeImplicitAutograd_int_moveaxis(const at::Tensor & self, int64_t source, int64_t destination) { |
3082 | // No device check |
3083 | // DeviceGuard omitted |
3084 | return at::native::moveaxis(self, source, destination); |
3085 | } |
3086 | } // anonymous namespace |
3087 | namespace { |
3088 | at::Tensor wrapper_CompositeImplicitAutograd__numpy_T(const at::Tensor & self) { |
3089 | // No device check |
3090 | // DeviceGuard omitted |
3091 | return at::native::numpy_T(self); |
3092 | } |
3093 | } // anonymous namespace |
3094 | namespace { |
3095 | at::Tensor wrapper_CompositeImplicitAutograd__matrix_H(const at::Tensor & self) { |
3096 | // No device check |
3097 | // DeviceGuard omitted |
3098 | return at::native::matrix_H(self); |
3099 | } |
3100 | } // anonymous namespace |
3101 | namespace { |
3102 | at::Tensor wrapper_CompositeImplicitAutograd__mT(const at::Tensor & self) { |
3103 | // No device check |
3104 | // DeviceGuard omitted |
3105 | return at::native::mT(self); |
3106 | } |
3107 | } // anonymous namespace |
3108 | namespace { |
3109 | at::Tensor wrapper_CompositeImplicitAutograd__mH(const at::Tensor & self) { |
3110 | // No device check |
3111 | // DeviceGuard omitted |
3112 | return at::native::mH(self); |
3113 | } |
3114 | } // anonymous namespace |
3115 | namespace { |
3116 | at::Tensor wrapper_CompositeImplicitAutograd__adjoint(const at::Tensor & self) { |
3117 | // No device check |
3118 | // DeviceGuard omitted |
3119 | return at::native::adjoint(self); |
3120 | } |
3121 | } // anonymous namespace |
3122 | namespace { |
3123 | at::Tensor wrapper_CompositeImplicitAutograd__native_channel_shuffle(const at::Tensor & self, int64_t groups) { |
3124 | // No device check |
3125 | // DeviceGuard omitted |
3126 | return at::native::math_channel_shuffle(self, groups); |
3127 | } |
3128 | } // anonymous namespace |
3129 | namespace { |
3130 | at::Tensor wrapper_CompositeImplicitAutograd__pin_memory(const at::Tensor & self, c10::optional<at::Device> device) { |
3131 | // No device check |
3132 | // DeviceGuard omitted |
3133 | return at::native::pin_memory(self, device); |
3134 | } |
3135 | } // anonymous namespace |
3136 | namespace { |
3137 | at::Tensor wrapper_CompositeImplicitAutograd__pinverse(const at::Tensor & self, double rcond) { |
3138 | // No device check |
3139 | // DeviceGuard omitted |
3140 | return at::native::pinverse(self, rcond); |
3141 | } |
3142 | } // anonymous namespace |
3143 | namespace { |
3144 | at::Tensor wrapper_CompositeImplicitAutograd__poisson_nll_loss(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) { |
3145 | // No device check |
3146 | // DeviceGuard omitted |
3147 | return at::native::poisson_nll_loss(input, target, log_input, full, eps, reduction); |
3148 | } |
3149 | } // anonymous namespace |
3150 | namespace { |
3151 | at::Tensor & wrapper_CompositeImplicitAutograd_generator_out_rand_out(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
3152 | // No device check |
3153 | // DeviceGuard omitted |
3154 | return at::native::rand_out(C10_AS_INTARRAYREF_SLOW(size), generator, out); |
3155 | } |
3156 | } // anonymous namespace |
3157 | namespace { |
3158 | at::Tensor & wrapper_CompositeImplicitAutograd_out_randn_out(c10::SymIntArrayRef size, at::Tensor & out) { |
3159 | // No device check |
3160 | // DeviceGuard omitted |
3161 | return at::native::randn_out(C10_AS_INTARRAYREF_SLOW(size), out); |
3162 | } |
3163 | } // anonymous namespace |
3164 | namespace { |
3165 | at::Tensor & wrapper_CompositeImplicitAutograd_generator_out_randn_out(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
3166 | // No device check |
3167 | // DeviceGuard omitted |
3168 | return at::native::randn_out(C10_AS_INTARRAYREF_SLOW(size), generator, out); |
3169 | } |
3170 | } // anonymous namespace |
3171 | namespace { |
3172 | at::Tensor wrapper_CompositeImplicitAutograd__ravel(const at::Tensor & self) { |
3173 | // No device check |
3174 | // DeviceGuard omitted |
3175 | return at::native::ravel(self); |
3176 | } |
3177 | } // anonymous namespace |
3178 | namespace { |
3179 | at::Tensor wrapper_CompositeImplicitAutograd__negative(const at::Tensor & self) { |
3180 | // No device check |
3181 | // DeviceGuard omitted |
3182 | return at::native::negative(self); |
3183 | } |
3184 | } // anonymous namespace |
3185 | namespace { |
3186 | at::Tensor & wrapper_CompositeImplicitAutograd_out_negative_out(const at::Tensor & self, at::Tensor & out) { |
3187 | // No device check |
3188 | // DeviceGuard omitted |
3189 | return at::native::negative_out(self, out); |
3190 | } |
3191 | } // anonymous namespace |
3192 | namespace { |
3193 | at::Tensor & wrapper_CompositeImplicitAutograd__negative_(at::Tensor & self) { |
3194 | // No device check |
3195 | // DeviceGuard omitted |
3196 | return at::native::negative_(self); |
3197 | } |
3198 | } // anonymous namespace |
3199 | namespace { |
3200 | at::Tensor wrapper_CompositeImplicitAutograd_self_Tensor_repeat_interleave(const at::Tensor & self, const at::Tensor & repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) { |
3201 | // No device check |
3202 | // DeviceGuard omitted |
3203 | return at::native::repeat_interleave(self, repeats, dim, output_size); |
3204 | } |
3205 | } // anonymous namespace |
3206 | namespace { |
3207 | at::Tensor wrapper_CompositeImplicitAutograd_self_int_repeat_interleave(const at::Tensor & self, c10::SymInt repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) { |
3208 | // No device check |
3209 | // DeviceGuard omitted |
3210 | return at::native::repeat_interleave_symint(self, repeats, dim, output_size); |
3211 | } |
3212 | } // anonymous namespace |
3213 | namespace { |
3214 | at::Tensor wrapper_CompositeImplicitAutograd__reshape(const at::Tensor & self, c10::SymIntArrayRef shape) { |
3215 | // No device check |
3216 | // DeviceGuard omitted |
3217 | return at::native::reshape_symint(self, shape); |
3218 | } |
3219 | } // anonymous namespace |
3220 | namespace { |
3221 | at::Tensor wrapper_CompositeImplicitAutograd__reshape_as(const at::Tensor & self, const at::Tensor & other) { |
3222 | // No device check |
3223 | // DeviceGuard omitted |
3224 | return at::native::reshape_as(self, other); |
3225 | } |
3226 | } // anonymous namespace |
3227 | namespace { |
3228 | at::Tensor wrapper_CompositeImplicitAutograd__rrelu(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) { |
3229 | // No device check |
3230 | // DeviceGuard omitted |
3231 | return at::native::rrelu(self, lower, upper, training, generator); |
3232 | } |
3233 | } // anonymous namespace |
3234 | namespace { |
3235 | at::Tensor & wrapper_CompositeImplicitAutograd__rrelu_(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) { |
3236 | // No device check |
3237 | // DeviceGuard omitted |
3238 | return at::native::rrelu_(self, lower, upper, training, generator); |
3239 | } |
3240 | } // anonymous namespace |
3241 | namespace { |
3242 | at::Tensor wrapper_CompositeImplicitAutograd__relu6(const at::Tensor & self) { |
3243 | // No device check |
3244 | // DeviceGuard omitted |
3245 | return at::native::relu6(self); |
3246 | } |
3247 | } // anonymous namespace |
3248 | namespace { |
3249 | at::Tensor & wrapper_CompositeImplicitAutograd__relu6_(at::Tensor & self) { |
3250 | // No device check |
3251 | // DeviceGuard omitted |
3252 | return at::native::relu6_(self); |
3253 | } |
3254 | } // anonymous namespace |
3255 | namespace { |
3256 | at::Tensor wrapper_CompositeImplicitAutograd__prelu(const at::Tensor & self, const at::Tensor & weight) { |
3257 | // No device check |
3258 | // DeviceGuard omitted |
3259 | return at::native::prelu(self, weight); |
3260 | } |
3261 | } // anonymous namespace |
3262 | namespace { |
3263 | at::Tensor wrapper_CompositeImplicitAutograd__infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self) { |
3264 | // No device check |
3265 | // DeviceGuard omitted |
3266 | return at::native::infinitely_differentiable_gelu_backward(grad, self); |
3267 | } |
3268 | } // anonymous namespace |
3269 | namespace { |
3270 | at::Tensor wrapper_CompositeImplicitAutograd_Dimname_select(const at::Tensor & self, at::Dimname dim, int64_t index) { |
3271 | // No device check |
3272 | // DeviceGuard omitted |
3273 | return at::native::select(self, dim, index); |
3274 | } |
3275 | } // anonymous namespace |
3276 | namespace { |
3277 | at::Tensor wrapper_CompositeImplicitAutograd__selu(const at::Tensor & self) { |
3278 | // No device check |
3279 | // DeviceGuard omitted |
3280 | return at::native::selu(self); |
3281 | } |
3282 | } // anonymous namespace |
3283 | namespace { |
3284 | at::Tensor & wrapper_CompositeImplicitAutograd__selu_(at::Tensor & self) { |
3285 | // No device check |
3286 | // DeviceGuard omitted |
3287 | return at::native::selu_(self); |
3288 | } |
3289 | } // anonymous namespace |
3290 | namespace { |
3291 | at::Tensor wrapper_CompositeImplicitAutograd__silu_backward(const at::Tensor & grad_output, const at::Tensor & self) { |
3292 | // No device check |
3293 | // DeviceGuard omitted |
3294 | return at::native::math_silu_backward(grad_output, self); |
3295 | } |
3296 | } // anonymous namespace |
3297 | namespace { |
3298 | at::Tensor wrapper_CompositeImplicitAutograd__mish_backward(const at::Tensor & grad_output, const at::Tensor & self) { |
3299 | // No device check |
3300 | // DeviceGuard omitted |
3301 | return at::native::math_mish_backward(grad_output, self); |
3302 | } |
3303 | } // anonymous namespace |
3304 | namespace { |
3305 | int64_t wrapper_CompositeImplicitAutograd_int_size(const at::Tensor & self, int64_t dim) { |
3306 | // No device check |
3307 | // DeviceGuard omitted |
3308 | return at::native::size(self, dim); |
3309 | } |
3310 | } // anonymous namespace |
3311 | namespace { |
3312 | int64_t wrapper_CompositeImplicitAutograd_Dimname_size(const at::Tensor & self, at::Dimname dim) { |
3313 | // No device check |
3314 | // DeviceGuard omitted |
3315 | return at::native::size(self, dim); |
3316 | } |
3317 | } // anonymous namespace |
3318 | namespace { |
3319 | at::Tensor wrapper_CompositeImplicitAutograd__smm(const at::Tensor & self, const at::Tensor & mat2) { |
3320 | // No device check |
3321 | // DeviceGuard omitted |
3322 | return at::native::smm(self, mat2); |
3323 | } |
3324 | } // anonymous namespace |
3325 | namespace { |
3326 | at::Tensor wrapper_CompositeImplicitAutograd_int_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
3327 | // No device check |
3328 | // DeviceGuard omitted |
3329 | return at::native::softmax(self, dim, dtype); |
3330 | } |
3331 | } // anonymous namespace |
3332 | namespace { |
3333 | at::Tensor wrapper_CompositeImplicitAutograd_Dimname_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
3334 | // No device check |
3335 | // DeviceGuard omitted |
3336 | return at::native::softmax(self, dim, dtype); |
3337 | } |
3338 | } // anonymous namespace |
3339 | namespace { |
3340 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_sizes_split(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) { |
3341 | // No device check |
3342 | // DeviceGuard omitted |
3343 | return at::native::split_symint(self, split_size, dim); |
3344 | } |
3345 | } // anonymous namespace |
3346 | namespace { |
3347 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_int_hsplit(const at::Tensor & self, int64_t sections) { |
3348 | // No device check |
3349 | // DeviceGuard omitted |
3350 | return at::native::hsplit(self, sections); |
3351 | } |
3352 | } // anonymous namespace |
3353 | namespace { |
3354 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_array_hsplit(const at::Tensor & self, at::IntArrayRef indices) { |
3355 | // No device check |
3356 | // DeviceGuard omitted |
3357 | return at::native::hsplit(self, indices); |
3358 | } |
3359 | } // anonymous namespace |
3360 | namespace { |
3361 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_int_vsplit(const at::Tensor & self, int64_t sections) { |
3362 | // No device check |
3363 | // DeviceGuard omitted |
3364 | return at::native::vsplit(self, sections); |
3365 | } |
3366 | } // anonymous namespace |
3367 | namespace { |
3368 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_array_vsplit(const at::Tensor & self, at::IntArrayRef indices) { |
3369 | // No device check |
3370 | // DeviceGuard omitted |
3371 | return at::native::vsplit(self, indices); |
3372 | } |
3373 | } // anonymous namespace |
3374 | namespace { |
3375 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_int_dsplit(const at::Tensor & self, int64_t sections) { |
3376 | // No device check |
3377 | // DeviceGuard omitted |
3378 | return at::native::dsplit(self, sections); |
3379 | } |
3380 | } // anonymous namespace |
3381 | namespace { |
3382 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_array_dsplit(const at::Tensor & self, at::IntArrayRef indices) { |
3383 | // No device check |
3384 | // DeviceGuard omitted |
3385 | return at::native::dsplit(self, indices); |
3386 | } |
3387 | } // anonymous namespace |
3388 | namespace { |
3389 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_squeeze(const at::Tensor & self, at::Dimname dim) { |
3390 | // No device check |
3391 | // DeviceGuard omitted |
3392 | return at::native::squeeze(self, dim); |
3393 | } |
3394 | } // anonymous namespace |
3395 | namespace { |
3396 | at::Tensor & wrapper_CompositeImplicitAutograd_dimname_squeeze_(at::Tensor & self, at::Dimname dim) { |
3397 | // No device check |
3398 | // DeviceGuard omitted |
3399 | return at::native::squeeze_(self, dim); |
3400 | } |
3401 | } // anonymous namespace |
3402 | namespace { |
3403 | at::Tensor wrapper_CompositeImplicitAutograd__sspaddmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
3404 | // No device check |
3405 | // DeviceGuard omitted |
3406 | return at::native::sspaddmm(self, mat1, mat2, beta, alpha); |
3407 | } |
3408 | } // anonymous namespace |
3409 | namespace { |
3410 | at::Tensor wrapper_CompositeImplicitAutograd__hstack(at::TensorList tensors) { |
3411 | // No device check |
3412 | // DeviceGuard omitted |
3413 | return at::native::hstack(tensors); |
3414 | } |
3415 | } // anonymous namespace |
3416 | namespace { |
3417 | at::Tensor & wrapper_CompositeImplicitAutograd_out_hstack_out(at::TensorList tensors, at::Tensor & out) { |
3418 | // No device check |
3419 | // DeviceGuard omitted |
3420 | return at::native::hstack_out(tensors, out); |
3421 | } |
3422 | } // anonymous namespace |
3423 | namespace { |
3424 | at::Tensor wrapper_CompositeImplicitAutograd__vstack(at::TensorList tensors) { |
3425 | // No device check |
3426 | // DeviceGuard omitted |
3427 | return at::native::vstack(tensors); |
3428 | } |
3429 | } // anonymous namespace |
3430 | namespace { |
3431 | at::Tensor & wrapper_CompositeImplicitAutograd_out_vstack_out(at::TensorList tensors, at::Tensor & out) { |
3432 | // No device check |
3433 | // DeviceGuard omitted |
3434 | return at::native::vstack_out(tensors, out); |
3435 | } |
3436 | } // anonymous namespace |
3437 | namespace { |
3438 | at::Tensor wrapper_CompositeImplicitAutograd__dstack(at::TensorList tensors) { |
3439 | // No device check |
3440 | // DeviceGuard omitted |
3441 | return at::native::dstack(tensors); |
3442 | } |
3443 | } // anonymous namespace |
3444 | namespace { |
3445 | at::Tensor & wrapper_CompositeImplicitAutograd_out_dstack_out(at::TensorList tensors, at::Tensor & out) { |
3446 | // No device check |
3447 | // DeviceGuard omitted |
3448 | return at::native::dstack_out(tensors, out); |
3449 | } |
3450 | } // anonymous namespace |
3451 | namespace { |
3452 | at::Tensor wrapper_CompositeImplicitAutograd__stft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) { |
3453 | // No device check |
3454 | // DeviceGuard omitted |
3455 | return at::native::stft(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex); |
3456 | } |
3457 | } // anonymous namespace |
3458 | namespace { |
3459 | at::Tensor wrapper_CompositeImplicitAutograd_center_stft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) { |
3460 | // No device check |
3461 | // DeviceGuard omitted |
3462 | return at::native::stft(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex); |
3463 | } |
3464 | } // anonymous namespace |
3465 | namespace { |
3466 | at::Tensor wrapper_CompositeImplicitAutograd__istft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, bool normalized, c10::optional<bool> onesided, c10::optional<int64_t> length, bool return_complex) { |
3467 | // No device check |
3468 | // DeviceGuard omitted |
3469 | return at::native::istft(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex); |
3470 | } |
3471 | } // anonymous namespace |
3472 | namespace { |
3473 | int64_t wrapper_CompositeImplicitAutograd_int_stride(const at::Tensor & self, int64_t dim) { |
3474 | // No device check |
3475 | // DeviceGuard omitted |
3476 | return at::native::stride(self, dim); |
3477 | } |
3478 | } // anonymous namespace |
3479 | namespace { |
3480 | int64_t wrapper_CompositeImplicitAutograd_Dimname_stride(const at::Tensor & self, at::Dimname dim) { |
3481 | // No device check |
3482 | // DeviceGuard omitted |
3483 | return at::native::stride(self, dim); |
3484 | } |
3485 | } // anonymous namespace |
3486 | namespace { |
3487 | at::Tensor wrapper_CompositeImplicitAutograd_dim_DimnameList_sum(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
3488 | // No device check |
3489 | // DeviceGuard omitted |
3490 | return at::native::sum(self, dim, keepdim, dtype); |
3491 | } |
3492 | } // anonymous namespace |
3493 | namespace { |
3494 | at::Tensor & wrapper_CompositeImplicitAutograd_DimnameList_out_sum_out(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
3495 | // No device check |
3496 | // DeviceGuard omitted |
3497 | return at::native::sum_out(self, dim, keepdim, dtype, out); |
3498 | } |
3499 | } // anonymous namespace |
3500 | namespace { |
3501 | at::Tensor wrapper_CompositeImplicitAutograd__sum_to_size(const at::Tensor & self, at::IntArrayRef size) { |
3502 | // No device check |
3503 | // DeviceGuard omitted |
3504 | return at::native::sum_to_size(self, size); |
3505 | } |
3506 | } // anonymous namespace |
3507 | namespace { |
3508 | at::Tensor wrapper_CompositeImplicitAutograd__square(const at::Tensor & self) { |
3509 | // No device check |
3510 | // DeviceGuard omitted |
3511 | return at::native::square(self); |
3512 | } |
3513 | } // anonymous namespace |
3514 | namespace { |
3515 | at::Tensor & wrapper_CompositeImplicitAutograd_out_square_out(const at::Tensor & self, at::Tensor & out) { |
3516 | // No device check |
3517 | // DeviceGuard omitted |
3518 | return at::native::square_out(self, out); |
3519 | } |
3520 | } // anonymous namespace |
3521 | namespace { |
3522 | at::Tensor & wrapper_CompositeImplicitAutograd__square_(at::Tensor & self) { |
3523 | // No device check |
3524 | // DeviceGuard omitted |
3525 | return at::native::square_(self); |
3526 | } |
3527 | } // anonymous namespace |
3528 | namespace { |
3529 | at::Tensor wrapper_CompositeImplicitAutograd__std(const at::Tensor & self, bool unbiased) { |
3530 | // No device check |
3531 | // DeviceGuard omitted |
3532 | return at::native::std(self, unbiased); |
3533 | } |
3534 | } // anonymous namespace |
3535 | namespace { |
3536 | at::Tensor wrapper_CompositeImplicitAutograd_dim_std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { |
3537 | // No device check |
3538 | // DeviceGuard omitted |
3539 | return at::native::std(self, dim, unbiased, keepdim); |
3540 | } |
3541 | } // anonymous namespace |
3542 | namespace { |
3543 | at::Tensor & wrapper_CompositeImplicitAutograd_out_std_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) { |
3544 | // No device check |
3545 | // DeviceGuard omitted |
3546 | return at::native::std_out(self, dim, unbiased, keepdim, out); |
3547 | } |
3548 | } // anonymous namespace |
3549 | namespace { |
3550 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__std_mean(const at::Tensor & self, bool unbiased) { |
3551 | // No device check |
3552 | // DeviceGuard omitted |
3553 | return at::native::std_mean(self, unbiased); |
3554 | } |
3555 | } // anonymous namespace |
3556 | namespace { |
3557 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dim_std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { |
3558 | // No device check |
3559 | // DeviceGuard omitted |
3560 | return at::native::std_mean(self, dim, unbiased, keepdim); |
3561 | } |
3562 | } // anonymous namespace |
3563 | namespace { |
3564 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_names_dim_std_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { |
3565 | // No device check |
3566 | // DeviceGuard omitted |
3567 | return at::native::std_mean(self, dim, unbiased, keepdim); |
3568 | } |
3569 | } // anonymous namespace |
3570 | namespace { |
3571 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_correction_names_std_mean(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) { |
3572 | // No device check |
3573 | // DeviceGuard omitted |
3574 | return at::native::std_mean(self, dim, correction, keepdim); |
3575 | } |
3576 | } // anonymous namespace |
3577 | namespace { |
3578 | at::Tensor wrapper_CompositeImplicitAutograd_names_dim_std(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { |
3579 | // No device check |
3580 | // DeviceGuard omitted |
3581 | return at::native::std(self, dim, unbiased, keepdim); |
3582 | } |
3583 | } // anonymous namespace |
3584 | namespace { |
3585 | at::Tensor & wrapper_CompositeImplicitAutograd_names_out_std_out(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) { |
3586 | // No device check |
3587 | // DeviceGuard omitted |
3588 | return at::native::std_out(self, dim, unbiased, keepdim, out); |
3589 | } |
3590 | } // anonymous namespace |
3591 | namespace { |
3592 | at::Tensor wrapper_CompositeImplicitAutograd_correction_names_std(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) { |
3593 | // No device check |
3594 | // DeviceGuard omitted |
3595 | return at::native::std(self, dim, correction, keepdim); |
3596 | } |
3597 | } // anonymous namespace |
3598 | namespace { |
3599 | at::Tensor & wrapper_CompositeImplicitAutograd_correction_names_out_std_out(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) { |
3600 | // No device check |
3601 | // DeviceGuard omitted |
3602 | return at::native::std_out(self, dim, correction, keepdim, out); |
3603 | } |
3604 | } // anonymous namespace |
3605 | namespace { |
3606 | at::Tensor wrapper_CompositeImplicitAutograd_dim_Dimname_prod(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
3607 | // No device check |
3608 | // DeviceGuard omitted |
3609 | return at::native::prod(self, dim, keepdim, dtype); |
3610 | } |
3611 | } // anonymous namespace |
3612 | namespace { |
3613 | at::Tensor & wrapper_CompositeImplicitAutograd_Dimname_out_prod_out(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
3614 | // No device check |
3615 | // DeviceGuard omitted |
3616 | return at::native::prod_out(self, dim, keepdim, dtype, out); |
3617 | } |
3618 | } // anonymous namespace |
3619 | namespace { |
3620 | at::Tensor wrapper_CompositeImplicitAutograd__tensordot(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) { |
3621 | // No device check |
3622 | // DeviceGuard omitted |
3623 | return at::native::tensordot(self, other, dims_self, dims_other); |
3624 | } |
3625 | } // anonymous namespace |
3626 | namespace { |
3627 | at::Tensor wrapper_CompositeImplicitAutograd__tile(const at::Tensor & self, at::IntArrayRef dims) { |
3628 | // No device check |
3629 | // DeviceGuard omitted |
3630 | return at::native::tile(self, dims); |
3631 | } |
3632 | } // anonymous namespace |
3633 | namespace { |
3634 | at::Tensor wrapper_CompositeImplicitAutograd_Dimname_transpose(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) { |
3635 | // No device check |
3636 | // DeviceGuard omitted |
3637 | return at::native::transpose(self, dim0, dim1); |
3638 | } |
3639 | } // anonymous namespace |
3640 | namespace { |
3641 | at::Tensor wrapper_CompositeImplicitAutograd__one_hot(const at::Tensor & self, int64_t num_classes) { |
3642 | // No device check |
3643 | // DeviceGuard omitted |
3644 | return at::native::one_hot(self, num_classes); |
3645 | } |
3646 | } // anonymous namespace |
3647 | namespace { |
3648 | at::Tensor wrapper_CompositeImplicitAutograd__fliplr(const at::Tensor & self) { |
3649 | // No device check |
3650 | // DeviceGuard omitted |
3651 | return at::native::fliplr(self); |
3652 | } |
3653 | } // anonymous namespace |
3654 | namespace { |
3655 | at::Tensor wrapper_CompositeImplicitAutograd__flipud(const at::Tensor & self) { |
3656 | // No device check |
3657 | // DeviceGuard omitted |
3658 | return at::native::flipud(self); |
3659 | } |
3660 | } // anonymous namespace |
3661 | namespace { |
3662 | at::Tensor wrapper_CompositeImplicitAutograd_x_trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim) { |
3663 | // No device check |
3664 | // DeviceGuard omitted |
3665 | return at::native::trapezoid(y, x, dim); |
3666 | } |
3667 | } // anonymous namespace |
3668 | namespace { |
3669 | at::Tensor wrapper_CompositeImplicitAutograd_dx_trapezoid(const at::Tensor & y, const at::Scalar & dx, int64_t dim) { |
3670 | // No device check |
3671 | // DeviceGuard omitted |
3672 | return at::native::trapezoid(y, dx, dim); |
3673 | } |
3674 | } // anonymous namespace |
3675 | namespace { |
3676 | at::Tensor wrapper_CompositeImplicitAutograd_x_trapz(const at::Tensor & y, const at::Tensor & x, int64_t dim) { |
3677 | // No device check |
3678 | // DeviceGuard omitted |
3679 | return at::native::trapz(y, x, dim); |
3680 | } |
3681 | } // anonymous namespace |
3682 | namespace { |
3683 | at::Tensor wrapper_CompositeImplicitAutograd_dx_trapz(const at::Tensor & y, double dx, int64_t dim) { |
3684 | // No device check |
3685 | // DeviceGuard omitted |
3686 | return at::native::trapz(y, dx, dim); |
3687 | } |
3688 | } // anonymous namespace |
3689 | namespace { |
3690 | at::Tensor wrapper_CompositeImplicitAutograd__triplet_margin_loss(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) { |
3691 | // No device check |
3692 | // DeviceGuard omitted |
3693 | return at::native::triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction); |
3694 | } |
3695 | } // anonymous namespace |
3696 | namespace { |
3697 | at::Tensor wrapper_CompositeImplicitAutograd__fix(const at::Tensor & self) { |
3698 | // No device check |
3699 | // DeviceGuard omitted |
3700 | return at::native::fix(self); |
3701 | } |
3702 | } // anonymous namespace |
3703 | namespace { |
3704 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fix_out(const at::Tensor & self, at::Tensor & out) { |
3705 | // No device check |
3706 | // DeviceGuard omitted |
3707 | return at::native::fix_out(self, out); |
3708 | } |
3709 | } // anonymous namespace |
3710 | namespace { |
3711 | at::Tensor & wrapper_CompositeImplicitAutograd__fix_(at::Tensor & self) { |
3712 | // No device check |
3713 | // DeviceGuard omitted |
3714 | return at::native::fix_(self); |
3715 | } |
3716 | } // anonymous namespace |
3717 | namespace { |
3718 | at::Tensor wrapper_CompositeImplicitAutograd__type_as(const at::Tensor & self, const at::Tensor & other) { |
3719 | // No device check |
3720 | // DeviceGuard omitted |
3721 | return at::native::type_as(self, other); |
3722 | } |
3723 | } // anonymous namespace |
3724 | namespace { |
3725 | bool wrapper_CompositeImplicitAutograd___has_compatible_shallow_copy_type(const at::Tensor & self, const at::Tensor & from) { |
3726 | // No device check |
3727 | // DeviceGuard omitted |
3728 | return at::native::_has_compatible_shallow_copy_type(self, from); |
3729 | } |
3730 | } // anonymous namespace |
3731 | namespace { |
3732 | at::Tensor wrapper_CompositeImplicitAutograd__vander(const at::Tensor & x, c10::optional<int64_t> N, bool increasing) { |
3733 | // No device check |
3734 | // DeviceGuard omitted |
3735 | return at::native::vander(x, N, increasing); |
3736 | } |
3737 | } // anonymous namespace |
3738 | namespace { |
3739 | at::Tensor wrapper_CompositeImplicitAutograd__var(const at::Tensor & self, bool unbiased) { |
3740 | // No device check |
3741 | // DeviceGuard omitted |
3742 | return at::native::var(self, unbiased); |
3743 | } |
3744 | } // anonymous namespace |
3745 | namespace { |
3746 | at::Tensor wrapper_CompositeImplicitAutograd_dim_var(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { |
3747 | // No device check |
3748 | // DeviceGuard omitted |
3749 | return at::native::var(self, dim, unbiased, keepdim); |
3750 | } |
3751 | } // anonymous namespace |
3752 | namespace { |
3753 | at::Tensor & wrapper_CompositeImplicitAutograd_out_var_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) { |
3754 | // No device check |
3755 | // DeviceGuard omitted |
3756 | return at::native::var_out(self, dim, unbiased, keepdim, out); |
3757 | } |
3758 | } // anonymous namespace |
3759 | namespace { |
3760 | at::Tensor wrapper_CompositeImplicitAutograd_names_dim_var(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { |
3761 | // No device check |
3762 | // DeviceGuard omitted |
3763 | return at::native::var(self, dim, unbiased, keepdim); |
3764 | } |
3765 | } // anonymous namespace |
3766 | namespace { |
3767 | at::Tensor & wrapper_CompositeImplicitAutograd_names_out_var_out(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) { |
3768 | // No device check |
3769 | // DeviceGuard omitted |
3770 | return at::native::var_out(self, dim, unbiased, keepdim, out); |
3771 | } |
3772 | } // anonymous namespace |
3773 | namespace { |
3774 | at::Tensor wrapper_CompositeImplicitAutograd_correction_names_var(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) { |
3775 | // No device check |
3776 | // DeviceGuard omitted |
3777 | return at::native::var(self, dim, correction, keepdim); |
3778 | } |
3779 | } // anonymous namespace |
3780 | namespace { |
3781 | at::Tensor & wrapper_CompositeImplicitAutograd_correction_names_out_var_out(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) { |
3782 | // No device check |
3783 | // DeviceGuard omitted |
3784 | return at::native::var_out(self, dim, correction, keepdim, out); |
3785 | } |
3786 | } // anonymous namespace |
3787 | namespace { |
3788 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__var_mean(const at::Tensor & self, bool unbiased) { |
3789 | // No device check |
3790 | // DeviceGuard omitted |
3791 | return at::native::var_mean(self, unbiased); |
3792 | } |
3793 | } // anonymous namespace |
3794 | namespace { |
3795 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dim_var_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { |
3796 | // No device check |
3797 | // DeviceGuard omitted |
3798 | return at::native::var_mean(self, dim, unbiased, keepdim); |
3799 | } |
3800 | } // anonymous namespace |
3801 | namespace { |
3802 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_names_dim_var_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { |
3803 | // No device check |
3804 | // DeviceGuard omitted |
3805 | return at::native::var_mean(self, dim, unbiased, keepdim); |
3806 | } |
3807 | } // anonymous namespace |
3808 | namespace { |
3809 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_correction_names_var_mean(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) { |
3810 | // No device check |
3811 | // DeviceGuard omitted |
3812 | return at::native::var_mean(self, dim, correction, keepdim); |
3813 | } |
3814 | } // anonymous namespace |
3815 | namespace { |
3816 | at::Tensor wrapper_CompositeImplicitAutograd__view_as(const at::Tensor & self, const at::Tensor & other) { |
3817 | // No device check |
3818 | // DeviceGuard omitted |
3819 | return at::native::view_as(self, other); |
3820 | } |
3821 | } // anonymous namespace |
3822 | namespace { |
3823 | at::Tensor wrapper_CompositeImplicitAutograd_ScalarSelf_where(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) { |
3824 | // No device check |
3825 | // DeviceGuard omitted |
3826 | return at::native::where(condition, self, other); |
3827 | } |
3828 | } // anonymous namespace |
3829 | namespace { |
3830 | at::Tensor wrapper_CompositeImplicitAutograd_ScalarOther_where(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) { |
3831 | // No device check |
3832 | // DeviceGuard omitted |
3833 | return at::native::where(condition, self, other); |
3834 | } |
3835 | } // anonymous namespace |
3836 | namespace { |
3837 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_where(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) { |
3838 | // No device check |
3839 | // DeviceGuard omitted |
3840 | return at::native::where(condition, self, other); |
3841 | } |
3842 | } // anonymous namespace |
3843 | namespace { |
3844 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__where(const at::Tensor & condition) { |
3845 | // No device check |
3846 | // DeviceGuard omitted |
3847 | return at::native::where(condition); |
3848 | } |
3849 | } // anonymous namespace |
3850 | namespace { |
3851 | at::Tensor wrapper_CompositeImplicitAutograd__norm_except_dim(const at::Tensor & v, int64_t pow, int64_t dim) { |
3852 | // No device check |
3853 | // DeviceGuard omitted |
3854 | return at::native::norm_except_dim(v, pow, dim); |
3855 | } |
3856 | } // anonymous namespace |
3857 | namespace { |
3858 | at::Tensor wrapper_CompositeImplicitAutograd___weight_norm(const at::Tensor & v, const at::Tensor & g, int64_t dim) { |
3859 | // No device check |
3860 | // DeviceGuard omitted |
3861 | return at::native::_weight_norm(v, g, dim); |
3862 | } |
3863 | } // anonymous namespace |
3864 | namespace { |
3865 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___weight_norm_differentiable_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { |
3866 | // No device check |
3867 | // DeviceGuard omitted |
3868 | return at::native::_weight_norm_differentiable_backward(grad_w, saved_v, saved_g, saved_norms, dim); |
3869 | } |
3870 | } // anonymous namespace |
3871 | namespace { |
3872 | at::Tensor wrapper_CompositeImplicitAutograd___sparse_sum(const at::Tensor & self) { |
3873 | // No device check |
3874 | // DeviceGuard omitted |
3875 | return at::native::_sparse_sum(self); |
3876 | } |
3877 | } // anonymous namespace |
3878 | namespace { |
3879 | at::Tensor wrapper_CompositeImplicitAutograd_dtype__sparse_sum(const at::Tensor & self, at::ScalarType dtype) { |
3880 | // No device check |
3881 | // DeviceGuard omitted |
3882 | return at::native::_sparse_sum(self, dtype); |
3883 | } |
3884 | } // anonymous namespace |
3885 | namespace { |
3886 | at::Tensor wrapper_CompositeImplicitAutograd_dim_dtype__sparse_sum(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) { |
3887 | // No device check |
3888 | // DeviceGuard omitted |
3889 | return at::native::_sparse_sum(self, dim, dtype); |
3890 | } |
3891 | } // anonymous namespace |
3892 | namespace { |
3893 | at::Tensor wrapper_CompositeImplicitAutograd_int__sparse_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
3894 | // No device check |
3895 | // DeviceGuard omitted |
3896 | return at::native::_sparse_softmax(self, dim, dtype); |
3897 | } |
3898 | } // anonymous namespace |
3899 | namespace { |
3900 | at::Tensor wrapper_CompositeImplicitAutograd_Dimname__sparse_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
3901 | // No device check |
3902 | // DeviceGuard omitted |
3903 | return at::native::_sparse_softmax(self, dim, dtype); |
3904 | } |
3905 | } // anonymous namespace |
3906 | namespace { |
3907 | at::Tensor wrapper_CompositeImplicitAutograd_int__sparse_log_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
3908 | // No device check |
3909 | // DeviceGuard omitted |
3910 | return at::native::_sparse_log_softmax(self, dim, dtype); |
3911 | } |
3912 | } // anonymous namespace |
3913 | namespace { |
3914 | at::Tensor wrapper_CompositeImplicitAutograd_Dimname__sparse_log_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
3915 | // No device check |
3916 | // DeviceGuard omitted |
3917 | return at::native::_sparse_log_softmax(self, dim, dtype); |
3918 | } |
3919 | } // anonymous namespace |
3920 | namespace { |
3921 | at::Tensor wrapper_CompositeImplicitAutograd_names_ScalarOpt_dim_dtype_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) { |
3922 | // No device check |
3923 | // DeviceGuard omitted |
3924 | return at::native::norm(self, p, dim, keepdim, dtype); |
3925 | } |
3926 | } // anonymous namespace |
3927 | namespace { |
3928 | at::Tensor & wrapper_CompositeImplicitAutograd_names_dtype_out_norm_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) { |
3929 | // No device check |
3930 | // DeviceGuard omitted |
3931 | return at::native::norm_out(self, p, dim, keepdim, dtype, out); |
3932 | } |
3933 | } // anonymous namespace |
3934 | namespace { |
3935 | at::Tensor wrapper_CompositeImplicitAutograd_names_ScalarOpt_dim_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) { |
3936 | // No device check |
3937 | // DeviceGuard omitted |
3938 | return at::native::norm(self, p, dim, keepdim); |
3939 | } |
3940 | } // anonymous namespace |
3941 | namespace { |
3942 | at::Tensor & wrapper_CompositeImplicitAutograd_names_out_norm_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out) { |
3943 | // No device check |
3944 | // DeviceGuard omitted |
3945 | return at::native::norm_out(self, p, dim, keepdim, out); |
3946 | } |
3947 | } // anonymous namespace |
3948 | namespace { |
3949 | at::Tensor wrapper_CompositeImplicitAutograd_dim_frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { |
3950 | // No device check |
3951 | // DeviceGuard omitted |
3952 | return at::native::frobenius_norm(self, dim, keepdim); |
3953 | } |
3954 | } // anonymous namespace |
3955 | namespace { |
3956 | at::Tensor & wrapper_CompositeImplicitAutograd_out_frobenius_norm_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { |
3957 | // No device check |
3958 | // DeviceGuard omitted |
3959 | return at::native::frobenius_norm_out(self, dim, keepdim, out); |
3960 | } |
3961 | } // anonymous namespace |
3962 | namespace { |
3963 | at::Tensor wrapper_CompositeImplicitAutograd__nuclear_norm(const at::Tensor & self, bool keepdim) { |
3964 | // No device check |
3965 | // DeviceGuard omitted |
3966 | return at::native::nuclear_norm(self, keepdim); |
3967 | } |
3968 | } // anonymous namespace |
3969 | namespace { |
3970 | at::Tensor & wrapper_CompositeImplicitAutograd_out_nuclear_norm_out(const at::Tensor & self, bool keepdim, at::Tensor & out) { |
3971 | // No device check |
3972 | // DeviceGuard omitted |
3973 | return at::native::nuclear_norm_out(self, keepdim, out); |
3974 | } |
3975 | } // anonymous namespace |
3976 | namespace { |
3977 | at::Tensor wrapper_CompositeImplicitAutograd_dim_nuclear_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { |
3978 | // No device check |
3979 | // DeviceGuard omitted |
3980 | return at::native::nuclear_norm(self, dim, keepdim); |
3981 | } |
3982 | } // anonymous namespace |
3983 | namespace { |
3984 | at::Tensor & wrapper_CompositeImplicitAutograd_dim_out_nuclear_norm_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { |
3985 | // No device check |
3986 | // DeviceGuard omitted |
3987 | return at::native::nuclear_norm_out(self, dim, keepdim, out); |
3988 | } |
3989 | } // anonymous namespace |
3990 | namespace { |
3991 | at::Tensor wrapper_CompositeImplicitAutograd__positive(const at::Tensor & self) { |
3992 | // No device check |
3993 | // DeviceGuard omitted |
3994 | return at::native::positive(self); |
3995 | } |
3996 | } // anonymous namespace |
3997 | namespace { |
3998 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_subtract(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
3999 | // No device check |
4000 | // DeviceGuard omitted |
4001 | return at::native::subtract(self, other, alpha); |
4002 | } |
4003 | } // anonymous namespace |
4004 | namespace { |
4005 | at::Tensor & wrapper_CompositeImplicitAutograd_out_subtract_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
4006 | // No device check |
4007 | // DeviceGuard omitted |
4008 | return at::native::subtract_out(self, other, alpha, out); |
4009 | } |
4010 | } // anonymous namespace |
4011 | namespace { |
4012 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_subtract_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
4013 | // No device check |
4014 | // DeviceGuard omitted |
4015 | return at::native::subtract_(self, other, alpha); |
4016 | } |
4017 | } // anonymous namespace |
4018 | namespace { |
4019 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_subtract(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
4020 | // No device check |
4021 | // DeviceGuard omitted |
4022 | return at::native::subtract(self, other, alpha); |
4023 | } |
4024 | } // anonymous namespace |
4025 | namespace { |
4026 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_subtract_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
4027 | // No device check |
4028 | // DeviceGuard omitted |
4029 | return at::native::subtract_(self, other, alpha); |
4030 | } |
4031 | } // anonymous namespace |
4032 | namespace { |
4033 | at::Tensor wrapper_CompositeImplicitAutograd_comp_plain_value_size_sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4034 | // No device check |
4035 | // DeviceGuard omitted |
4036 | return at::native::sparse_compressed_tensor(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); |
4037 | } |
4038 | } // anonymous namespace |
4039 | namespace { |
4040 | at::Tensor wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4041 | // No device check |
4042 | // DeviceGuard omitted |
4043 | return at::native::sparse_csr_tensor(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); |
4044 | } |
4045 | } // anonymous namespace |
4046 | namespace { |
4047 | at::Tensor wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4048 | // No device check |
4049 | // DeviceGuard omitted |
4050 | return at::native::sparse_csc_tensor(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); |
4051 | } |
4052 | } // anonymous namespace |
4053 | namespace { |
4054 | at::Tensor wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4055 | // No device check |
4056 | // DeviceGuard omitted |
4057 | return at::native::sparse_bsr_tensor(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); |
4058 | } |
4059 | } // anonymous namespace |
4060 | namespace { |
4061 | at::Tensor wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4062 | // No device check |
4063 | // DeviceGuard omitted |
4064 | return at::native::sparse_bsc_tensor(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); |
4065 | } |
4066 | } // anonymous namespace |
4067 | namespace { |
4068 | at::Tensor wrapper_CompositeImplicitAutograd_comp_plain_value_sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4069 | // No device check |
4070 | // DeviceGuard omitted |
4071 | return at::native::sparse_compressed_tensor(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory); |
4072 | } |
4073 | } // anonymous namespace |
4074 | namespace { |
4075 | at::Tensor wrapper_CompositeImplicitAutograd_crow_col_value_sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4076 | // No device check |
4077 | // DeviceGuard omitted |
4078 | return at::native::sparse_csr_tensor(crow_indices, col_indices, values, dtype, layout, device, pin_memory); |
4079 | } |
4080 | } // anonymous namespace |
4081 | namespace { |
4082 | at::Tensor wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4083 | // No device check |
4084 | // DeviceGuard omitted |
4085 | return at::native::sparse_csc_tensor(ccol_indices, row_indices, values, dtype, layout, device, pin_memory); |
4086 | } |
4087 | } // anonymous namespace |
4088 | namespace { |
4089 | at::Tensor wrapper_CompositeImplicitAutograd_crow_col_value_sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4090 | // No device check |
4091 | // DeviceGuard omitted |
4092 | return at::native::sparse_bsr_tensor(crow_indices, col_indices, values, dtype, layout, device, pin_memory); |
4093 | } |
4094 | } // anonymous namespace |
4095 | namespace { |
4096 | at::Tensor wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4097 | // No device check |
4098 | // DeviceGuard omitted |
4099 | return at::native::sparse_bsc_tensor(ccol_indices, row_indices, values, dtype, layout, device, pin_memory); |
4100 | } |
4101 | } // anonymous namespace |
4102 | namespace { |
4103 | at::Tensor wrapper_CompositeImplicitAutograd___sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4104 | // No device check |
4105 | // DeviceGuard omitted |
4106 | return at::native::_sparse_compressed_tensor_unsafe(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); |
4107 | } |
4108 | } // anonymous namespace |
4109 | namespace { |
4110 | at::Tensor wrapper_CompositeImplicitAutograd___sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4111 | // No device check |
4112 | // DeviceGuard omitted |
4113 | return at::native::_sparse_csr_tensor_unsafe(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); |
4114 | } |
4115 | } // anonymous namespace |
4116 | namespace { |
4117 | at::Tensor wrapper_CompositeImplicitAutograd___sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4118 | // No device check |
4119 | // DeviceGuard omitted |
4120 | return at::native::_sparse_csc_tensor_unsafe(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); |
4121 | } |
4122 | } // anonymous namespace |
4123 | namespace { |
4124 | at::Tensor wrapper_CompositeImplicitAutograd___sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4125 | // No device check |
4126 | // DeviceGuard omitted |
4127 | return at::native::_sparse_bsr_tensor_unsafe(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); |
4128 | } |
4129 | } // anonymous namespace |
4130 | namespace { |
4131 | at::Tensor wrapper_CompositeImplicitAutograd___sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4132 | // No device check |
4133 | // DeviceGuard omitted |
4134 | return at::native::_sparse_bsc_tensor_unsafe(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); |
4135 | } |
4136 | } // anonymous namespace |
4137 | namespace { |
4138 | at::Tensor wrapper_CompositeImplicitAutograd_indices_sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4139 | // No device check |
4140 | // DeviceGuard omitted |
4141 | return at::native::sparse_coo_tensor(indices, values, dtype, layout, device, pin_memory); |
4142 | } |
4143 | } // anonymous namespace |
4144 | namespace { |
4145 | at::Tensor wrapper_CompositeImplicitAutograd_indices_size_sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4146 | // No device check |
4147 | // DeviceGuard omitted |
4148 | return at::native::sparse_coo_tensor(indices, values, size, dtype, layout, device, pin_memory); |
4149 | } |
4150 | } // anonymous namespace |
4151 | namespace { |
4152 | at::Tensor wrapper_CompositeImplicitAutograd___sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
4153 | // No device check |
4154 | // DeviceGuard omitted |
4155 | return at::native::_sparse_coo_tensor_unsafe_symint(indices, values, size, dtype, layout, device, pin_memory); |
4156 | } |
4157 | } // anonymous namespace |
4158 | namespace { |
4159 | void wrapper_CompositeImplicitAutograd___validate_sparse_coo_tensor_args(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size) { |
4160 | // No device check |
4161 | // DeviceGuard omitted |
4162 | return at::native::_validate_sparse_coo_tensor_args(indices, values, size); |
4163 | } |
4164 | } // anonymous namespace |
4165 | namespace { |
4166 | void wrapper_CompositeImplicitAutograd___validate_sparse_compressed_tensor_args(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) { |
4167 | // No device check |
4168 | // DeviceGuard omitted |
4169 | return at::native::_validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, values, size, layout); |
4170 | } |
4171 | } // anonymous namespace |
4172 | namespace { |
4173 | void wrapper_CompositeImplicitAutograd___validate_sparse_csr_tensor_args(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) { |
4174 | // No device check |
4175 | // DeviceGuard omitted |
4176 | return at::native::_validate_sparse_csr_tensor_args(crow_indices, col_indices, values, size); |
4177 | } |
4178 | } // anonymous namespace |
4179 | namespace { |
4180 | void wrapper_CompositeImplicitAutograd___validate_sparse_csc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { |
4181 | // No device check |
4182 | // DeviceGuard omitted |
4183 | return at::native::_validate_sparse_csc_tensor_args(ccol_indices, row_indices, values, size); |
4184 | } |
4185 | } // anonymous namespace |
4186 | namespace { |
4187 | void wrapper_CompositeImplicitAutograd___validate_sparse_bsr_tensor_args(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) { |
4188 | // No device check |
4189 | // DeviceGuard omitted |
4190 | return at::native::_validate_sparse_bsr_tensor_args(crow_indices, col_indices, values, size); |
4191 | } |
4192 | } // anonymous namespace |
4193 | namespace { |
4194 | void wrapper_CompositeImplicitAutograd___validate_sparse_bsc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { |
4195 | // No device check |
4196 | // DeviceGuard omitted |
4197 | return at::native::_validate_sparse_bsc_tensor_args(ccol_indices, row_indices, values, size); |
4198 | } |
4199 | } // anonymous namespace |
4200 | namespace { |
4201 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd___to_cpu(at::TensorList tensors) { |
4202 | // No device check |
4203 | // DeviceGuard omitted |
4204 | return at::native::_to_cpu(tensors); |
4205 | } |
4206 | } // anonymous namespace |
4207 | namespace { |
4208 | at::Tensor wrapper_CompositeImplicitAutograd__to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
4209 | // No device check |
4210 | // DeviceGuard omitted |
4211 | return at::native::to_dense(self, dtype); |
4212 | } |
4213 | } // anonymous namespace |
4214 | namespace { |
4215 | at::Tensor wrapper_CompositeImplicitAutograd__to_dense_backward(const at::Tensor & grad, const at::Tensor & input) { |
4216 | // No device check |
4217 | // DeviceGuard omitted |
4218 | return at::native::to_dense_backward(grad, input); |
4219 | } |
4220 | } // anonymous namespace |
4221 | namespace { |
4222 | at::Tensor wrapper_CompositeImplicitAutograd__coalesce(const at::Tensor & self) { |
4223 | // No device check |
4224 | // DeviceGuard omitted |
4225 | return at::native::coalesce(self); |
4226 | } |
4227 | } // anonymous namespace |
4228 | namespace { |
4229 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_Dimname_unbind(const at::Tensor & self, at::Dimname dim) { |
4230 | // No device check |
4231 | // DeviceGuard omitted |
4232 | return at::native::unbind(self, dim); |
4233 | } |
4234 | } // anonymous namespace |
4235 | namespace { |
4236 | at::Tensor wrapper_CompositeImplicitAutograd__to_mkldnn_backward(const at::Tensor & grad, const at::Tensor & input) { |
4237 | // No device check |
4238 | // DeviceGuard omitted |
4239 | return at::native::to_mkldnn_backward(grad, input); |
4240 | } |
4241 | } // anonymous namespace |
4242 | namespace { |
4243 | at::Tensor wrapper_CompositeImplicitAutograd__fake_quantize_per_tensor_affine(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { |
4244 | // No device check |
4245 | // DeviceGuard omitted |
4246 | return at::native::fake_quantize_per_tensor_affine(self, scale, zero_point, quant_min, quant_max); |
4247 | } |
4248 | } // anonymous namespace |
4249 | namespace { |
4250 | at::Tensor wrapper_CompositeImplicitAutograd_tensor_qparams_fake_quantize_per_tensor_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) { |
4251 | // No device check |
4252 | // DeviceGuard omitted |
4253 | return at::native::fake_quantize_per_tensor_affine(self, scale, zero_point, quant_min, quant_max); |
4254 | } |
4255 | } // anonymous namespace |
4256 | namespace { |
4257 | at::Tensor wrapper_CompositeImplicitAutograd__fake_quantize_per_tensor_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask) { |
4258 | // No device check |
4259 | // DeviceGuard omitted |
4260 | return at::native::fake_quantize_per_tensor_affine_cachemask_backward(grad, mask); |
4261 | } |
4262 | } // anonymous namespace |
4263 | namespace { |
4264 | at::Tensor wrapper_CompositeImplicitAutograd__fake_quantize_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { |
4265 | // No device check |
4266 | // DeviceGuard omitted |
4267 | return at::native::fake_quantize_per_channel_affine(self, scale, zero_point, axis, quant_min, quant_max); |
4268 | } |
4269 | } // anonymous namespace |
4270 | namespace { |
4271 | at::Tensor wrapper_CompositeImplicitAutograd__fake_quantize_per_channel_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask) { |
4272 | // No device check |
4273 | // DeviceGuard omitted |
4274 | return at::native::fake_quantize_per_channel_affine_cachemask_backward(grad, mask); |
4275 | } |
4276 | } // anonymous namespace |
4277 | namespace { |
4278 | at::Tensor wrapper_CompositeImplicitAutograd__fused_moving_avg_obs_fake_quant(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) { |
4279 | // No device check |
4280 | // DeviceGuard omitted |
4281 | return at::native::fused_moving_avg_obs_fake_quant(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); |
4282 | } |
4283 | } // anonymous namespace |
4284 | namespace { |
4285 | ::std::tuple<double,int64_t> wrapper_CompositeImplicitAutograd___choose_qparams_per_tensor(const at::Tensor & self, bool reduce_range) { |
4286 | // No device check |
4287 | // DeviceGuard omitted |
4288 | return at::native::_choose_qparams_per_tensor(self, reduce_range); |
4289 | } |
4290 | } // anonymous namespace |
4291 | namespace { |
4292 | at::Tensor wrapper_CompositeImplicitAutograd___saturate_weight_to_fp16(const at::Tensor & weight) { |
4293 | // No device check |
4294 | // DeviceGuard omitted |
4295 | return at::native::_saturate_weight_to_fp16(weight); |
4296 | } |
4297 | } // anonymous namespace |
4298 | namespace { |
4299 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__choose_qparams_optimized(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) { |
4300 | // No device check |
4301 | // DeviceGuard omitted |
4302 | return at::native::choose_qparams_optimized(input, numel, n_bins, ratio, bit_width); |
4303 | } |
4304 | } // anonymous namespace |
4305 | namespace { |
4306 | at::Tensor wrapper_CompositeImplicitAutograd___autocast_to_reduced_precision(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) { |
4307 | // No device check |
4308 | // DeviceGuard omitted |
4309 | return at::native::_autocast_to_reduced_precision(self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype); |
4310 | } |
4311 | } // anonymous namespace |
4312 | namespace { |
4313 | at::Tensor wrapper_CompositeImplicitAutograd___autocast_to_full_precision(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) { |
4314 | // No device check |
4315 | // DeviceGuard omitted |
4316 | return at::native::_autocast_to_full_precision(self, cuda_enabled, cpu_enabled); |
4317 | } |
4318 | } // anonymous namespace |
4319 | namespace { |
4320 | at::Tensor wrapper_CompositeImplicitAutograd_dtype_layout_to(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) { |
4321 | // No device check |
4322 | // DeviceGuard omitted |
4323 | return at::native::to(self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format); |
4324 | } |
4325 | } // anonymous namespace |
4326 | namespace { |
4327 | at::Tensor wrapper_CompositeImplicitAutograd_device_to(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) { |
4328 | // No device check |
4329 | // DeviceGuard omitted |
4330 | return at::native::to(self, device, dtype, non_blocking, copy, memory_format); |
4331 | } |
4332 | } // anonymous namespace |
4333 | namespace { |
4334 | at::Tensor wrapper_CompositeImplicitAutograd_dtype_to(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) { |
4335 | // No device check |
4336 | // DeviceGuard omitted |
4337 | return at::native::to(self, dtype, non_blocking, copy, memory_format); |
4338 | } |
4339 | } // anonymous namespace |
4340 | namespace { |
4341 | at::Tensor wrapper_CompositeImplicitAutograd_other_to(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) { |
4342 | // No device check |
4343 | // DeviceGuard omitted |
4344 | return at::native::to(self, other, non_blocking, copy, memory_format); |
4345 | } |
4346 | } // anonymous namespace |
4347 | namespace { |
4348 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__meshgrid(at::TensorList tensors) { |
4349 | // No device check |
4350 | // DeviceGuard omitted |
4351 | return at::native::meshgrid(tensors); |
4352 | } |
4353 | } // anonymous namespace |
4354 | namespace { |
4355 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd_indexing_meshgrid(at::TensorList tensors, c10::string_view indexing) { |
4356 | // No device check |
4357 | // DeviceGuard omitted |
4358 | return at::native::meshgrid(tensors, indexing); |
4359 | } |
4360 | } // anonymous namespace |
4361 | namespace { |
4362 | at::Tensor wrapper_CompositeImplicitAutograd__cartesian_prod(at::TensorList tensors) { |
4363 | // No device check |
4364 | // DeviceGuard omitted |
4365 | return at::native::cartesian_prod(tensors); |
4366 | } |
4367 | } // anonymous namespace |
4368 | namespace { |
4369 | at::Tensor wrapper_CompositeImplicitAutograd__combinations(const at::Tensor & self, int64_t r, bool with_replacement) { |
4370 | // No device check |
4371 | // DeviceGuard omitted |
4372 | return at::native::combinations(self, r, with_replacement); |
4373 | } |
4374 | } // anonymous namespace |
4375 | namespace { |
4376 | at::Scalar wrapper_CompositeImplicitAutograd__item(const at::Tensor & self) { |
4377 | // No device check |
4378 | // DeviceGuard omitted |
4379 | return at::native::item(self); |
4380 | } |
4381 | } // anonymous namespace |
4382 | namespace { |
4383 | at::ScalarType wrapper_CompositeImplicitAutograd_Tensor_result_type(const at::Tensor & tensor, const at::Tensor & other) { |
4384 | // No device check |
4385 | // DeviceGuard omitted |
4386 | return at::native::result_type(tensor, other); |
4387 | } |
4388 | } // anonymous namespace |
4389 | namespace { |
4390 | at::ScalarType wrapper_CompositeImplicitAutograd_Scalar_result_type(const at::Tensor & tensor, const at::Scalar & other) { |
4391 | // No device check |
4392 | // DeviceGuard omitted |
4393 | return at::native::result_type(tensor, other); |
4394 | } |
4395 | } // anonymous namespace |
4396 | namespace { |
4397 | at::ScalarType wrapper_CompositeImplicitAutograd_Scalar_Tensor_result_type(const at::Scalar & scalar, const at::Tensor & tensor) { |
4398 | // No device check |
4399 | // DeviceGuard omitted |
4400 | return at::native::result_type(scalar, tensor); |
4401 | } |
4402 | } // anonymous namespace |
4403 | namespace { |
4404 | at::ScalarType wrapper_CompositeImplicitAutograd_Scalar_Scalar_result_type(const at::Scalar & scalar1, const at::Scalar & scalar2) { |
4405 | // No device check |
4406 | // DeviceGuard omitted |
4407 | return at::native::result_type(scalar1, scalar2); |
4408 | } |
4409 | } // anonymous namespace |
4410 | namespace { |
4411 | bool wrapper_CompositeImplicitAutograd__can_cast(at::ScalarType from, at::ScalarType to) { |
4412 | // No device check |
4413 | // DeviceGuard omitted |
4414 | return at::native::can_cast(from, to); |
4415 | } |
4416 | } // anonymous namespace |
4417 | namespace { |
4418 | at::ScalarType wrapper_CompositeImplicitAutograd__promote_types(at::ScalarType type1, at::ScalarType type2) { |
4419 | // No device check |
4420 | // DeviceGuard omitted |
4421 | return at::native::promote_types(type1, type2); |
4422 | } |
4423 | } // anonymous namespace |
4424 | namespace { |
4425 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___thnn_fused_lstm_cell_backward(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { |
4426 | // No device check |
4427 | // DeviceGuard omitted |
4428 | return at::native::_thnn_fused_lstm_cell_backward(grad_hy, grad_cy, cx, cy, workspace, has_bias); |
4429 | } |
4430 | } // anonymous namespace |
4431 | namespace { |
4432 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___thnn_differentiable_lstm_cell_backward(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) { |
4433 | // No device check |
4434 | // DeviceGuard omitted |
4435 | return at::native::_thnn_differentiable_lstm_cell_backward(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy); |
4436 | } |
4437 | } // anonymous namespace |
4438 | namespace { |
4439 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___thnn_differentiable_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) { |
4440 | // No device check |
4441 | // DeviceGuard omitted |
4442 | return at::native::_thnn_differentiable_gru_cell_backward(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias); |
4443 | } |
4444 | } // anonymous namespace |
4445 | namespace { |
4446 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_input_lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { |
4447 | // No device check |
4448 | // DeviceGuard omitted |
4449 | return at::native::lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); |
4450 | } |
4451 | } // anonymous namespace |
4452 | namespace { |
4453 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_data_lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { |
4454 | // No device check |
4455 | // DeviceGuard omitted |
4456 | return at::native::lstm(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); |
4457 | } |
4458 | } // anonymous namespace |
4459 | namespace { |
4460 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_input_gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { |
4461 | // No device check |
4462 | // DeviceGuard omitted |
4463 | return at::native::gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); |
4464 | } |
4465 | } // anonymous namespace |
4466 | namespace { |
4467 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_data_gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { |
4468 | // No device check |
4469 | // DeviceGuard omitted |
4470 | return at::native::gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); |
4471 | } |
4472 | } // anonymous namespace |
4473 | namespace { |
4474 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_input_rnn_tanh(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { |
4475 | // No device check |
4476 | // DeviceGuard omitted |
4477 | return at::native::rnn_tanh(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); |
4478 | } |
4479 | } // anonymous namespace |
4480 | namespace { |
4481 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_data_rnn_tanh(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { |
4482 | // No device check |
4483 | // DeviceGuard omitted |
4484 | return at::native::rnn_tanh(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); |
4485 | } |
4486 | } // anonymous namespace |
4487 | namespace { |
4488 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_input_rnn_relu(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { |
4489 | // No device check |
4490 | // DeviceGuard omitted |
4491 | return at::native::rnn_relu(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); |
4492 | } |
4493 | } // anonymous namespace |
4494 | namespace { |
4495 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_data_rnn_relu(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { |
4496 | // No device check |
4497 | // DeviceGuard omitted |
4498 | return at::native::rnn_relu(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); |
4499 | } |
4500 | } // anonymous namespace |
4501 | namespace { |
4502 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) { |
4503 | // No device check |
4504 | // DeviceGuard omitted |
4505 | return at::native::lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh); |
4506 | } |
4507 | } // anonymous namespace |
4508 | namespace { |
4509 | at::Tensor wrapper_CompositeImplicitAutograd__gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) { |
4510 | // No device check |
4511 | // DeviceGuard omitted |
4512 | return at::native::gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh); |
4513 | } |
4514 | } // anonymous namespace |
4515 | namespace { |
4516 | at::Tensor wrapper_CompositeImplicitAutograd__rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) { |
4517 | // No device check |
4518 | // DeviceGuard omitted |
4519 | return at::native::rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh); |
4520 | } |
4521 | } // anonymous namespace |
4522 | namespace { |
4523 | at::Tensor wrapper_CompositeImplicitAutograd__rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) { |
4524 | // No device check |
4525 | // DeviceGuard omitted |
4526 | return at::native::rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh); |
4527 | } |
4528 | } // anonymous namespace |
4529 | namespace { |
4530 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__quantized_lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { |
4531 | // No device check |
4532 | // DeviceGuard omitted |
4533 | return at::native::quantized_lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); |
4534 | } |
4535 | } // anonymous namespace |
4536 | namespace { |
4537 | at::Tensor wrapper_CompositeImplicitAutograd__quantized_gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { |
4538 | // No device check |
4539 | // DeviceGuard omitted |
4540 | return at::native::quantized_gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); |
4541 | } |
4542 | } // anonymous namespace |
4543 | namespace { |
4544 | at::Tensor wrapper_CompositeImplicitAutograd__quantized_rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { |
4545 | // No device check |
4546 | // DeviceGuard omitted |
4547 | return at::native::quantized_rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); |
4548 | } |
4549 | } // anonymous namespace |
4550 | namespace { |
4551 | at::Tensor wrapper_CompositeImplicitAutograd__quantized_rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { |
4552 | // No device check |
4553 | // DeviceGuard omitted |
4554 | return at::native::quantized_rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); |
4555 | } |
4556 | } // anonymous namespace |
4557 | namespace { |
4558 | at::Tensor wrapper_CompositeImplicitAutograd___pack_padded_sequence_backward(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) { |
4559 | // No device check |
4560 | // DeviceGuard omitted |
4561 | return at::native::_pack_padded_sequence_backward_symint(grad, input_size, batch_sizes, batch_first); |
4562 | } |
4563 | } // anonymous namespace |
4564 | namespace { |
4565 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___pad_packed_sequence(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) { |
4566 | // No device check |
4567 | // DeviceGuard omitted |
4568 | return at::native::_pad_packed_sequence(data, batch_sizes, batch_first, padding_value, total_length); |
4569 | } |
4570 | } // anonymous namespace |
4571 | namespace { |
4572 | at::Tensor & wrapper_CompositeImplicitAutograd_source_Tensor_storage_offset_set_(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
4573 | // No device check |
4574 | // DeviceGuard omitted |
4575 | return at::native::set__symint(self, source, storage_offset, size, stride); |
4576 | } |
4577 | } // anonymous namespace |
4578 | namespace { |
4579 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_index_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) { |
4580 | // No device check |
4581 | // DeviceGuard omitted |
4582 | return at::native::index_add(self, dim, index, source, alpha); |
4583 | } |
4584 | } // anonymous namespace |
4585 | namespace { |
4586 | at::Tensor & wrapper_CompositeImplicitAutograd_Dimname_Scalar_index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { |
4587 | // No device check |
4588 | // DeviceGuard omitted |
4589 | return at::native::index_fill_(self, dim, index, value); |
4590 | } |
4591 | } // anonymous namespace |
4592 | namespace { |
4593 | at::Tensor wrapper_CompositeImplicitAutograd_Dimname_Scalar_index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { |
4594 | // No device check |
4595 | // DeviceGuard omitted |
4596 | return at::native::index_fill(self, dim, index, value); |
4597 | } |
4598 | } // anonymous namespace |
4599 | namespace { |
4600 | at::Tensor & wrapper_CompositeImplicitAutograd_Dimname_Tensor_index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { |
4601 | // No device check |
4602 | // DeviceGuard omitted |
4603 | return at::native::index_fill_(self, dim, index, value); |
4604 | } |
4605 | } // anonymous namespace |
4606 | namespace { |
4607 | at::Tensor wrapper_CompositeImplicitAutograd_Dimname_Tensor_index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { |
4608 | // No device check |
4609 | // DeviceGuard omitted |
4610 | return at::native::index_fill(self, dim, index, value); |
4611 | } |
4612 | } // anonymous namespace |
4613 | namespace { |
4614 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_src_scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { |
4615 | // No device check |
4616 | // DeviceGuard omitted |
4617 | return at::native::scatter(self, dim, index, src); |
4618 | } |
4619 | } // anonymous namespace |
4620 | namespace { |
4621 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_value_scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { |
4622 | // No device check |
4623 | // DeviceGuard omitted |
4624 | return at::native::scatter(self, dim, index, value); |
4625 | } |
4626 | } // anonymous namespace |
4627 | namespace { |
4628 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_scatter_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { |
4629 | // No device check |
4630 | // DeviceGuard omitted |
4631 | return at::native::scatter_add(self, dim, index, src); |
4632 | } |
4633 | } // anonymous namespace |
4634 | namespace { |
4635 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_bitwise_and_(at::Tensor & self, const at::Scalar & other) { |
4636 | // No device check |
4637 | // DeviceGuard omitted |
4638 | return at::native::bitwise_and_(self, other); |
4639 | } |
4640 | } // anonymous namespace |
4641 | namespace { |
4642 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar___and__(const at::Tensor & self, const at::Scalar & other) { |
4643 | // No device check |
4644 | // DeviceGuard omitted |
4645 | return at::native::__and__(self, other); |
4646 | } |
4647 | } // anonymous namespace |
4648 | namespace { |
4649 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar___iand__(at::Tensor & self, const at::Scalar & other) { |
4650 | // No device check |
4651 | // DeviceGuard omitted |
4652 | return at::native::__iand__(self, other); |
4653 | } |
4654 | } // anonymous namespace |
4655 | namespace { |
4656 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor___and__(const at::Tensor & self, const at::Tensor & other) { |
4657 | // No device check |
4658 | // DeviceGuard omitted |
4659 | return at::native::__and__(self, other); |
4660 | } |
4661 | } // anonymous namespace |
4662 | namespace { |
4663 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor___iand__(at::Tensor & self, const at::Tensor & other) { |
4664 | // No device check |
4665 | // DeviceGuard omitted |
4666 | return at::native::__iand__(self, other); |
4667 | } |
4668 | } // anonymous namespace |
4669 | namespace { |
4670 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_bitwise_or(const at::Tensor & self, const at::Scalar & other) { |
4671 | // No device check |
4672 | // DeviceGuard omitted |
4673 | return at::native::bitwise_or(self, other); |
4674 | } |
4675 | } // anonymous namespace |
4676 | namespace { |
4677 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_bitwise_or_(at::Tensor & self, const at::Scalar & other) { |
4678 | // No device check |
4679 | // DeviceGuard omitted |
4680 | return at::native::bitwise_or_(self, other); |
4681 | } |
4682 | } // anonymous namespace |
4683 | namespace { |
4684 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar___or__(const at::Tensor & self, const at::Scalar & other) { |
4685 | // No device check |
4686 | // DeviceGuard omitted |
4687 | return at::native::__or__(self, other); |
4688 | } |
4689 | } // anonymous namespace |
4690 | namespace { |
4691 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar___ior__(at::Tensor & self, const at::Scalar & other) { |
4692 | // No device check |
4693 | // DeviceGuard omitted |
4694 | return at::native::__ior__(self, other); |
4695 | } |
4696 | } // anonymous namespace |
4697 | namespace { |
4698 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor___or__(const at::Tensor & self, const at::Tensor & other) { |
4699 | // No device check |
4700 | // DeviceGuard omitted |
4701 | return at::native::__or__(self, other); |
4702 | } |
4703 | } // anonymous namespace |
4704 | namespace { |
4705 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor___ior__(at::Tensor & self, const at::Tensor & other) { |
4706 | // No device check |
4707 | // DeviceGuard omitted |
4708 | return at::native::__ior__(self, other); |
4709 | } |
4710 | } // anonymous namespace |
4711 | namespace { |
4712 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_bitwise_xor(const at::Tensor & self, const at::Scalar & other) { |
4713 | // No device check |
4714 | // DeviceGuard omitted |
4715 | return at::native::bitwise_xor(self, other); |
4716 | } |
4717 | } // anonymous namespace |
4718 | namespace { |
4719 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_bitwise_xor_(at::Tensor & self, const at::Scalar & other) { |
4720 | // No device check |
4721 | // DeviceGuard omitted |
4722 | return at::native::bitwise_xor_(self, other); |
4723 | } |
4724 | } // anonymous namespace |
4725 | namespace { |
4726 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar___xor__(const at::Tensor & self, const at::Scalar & other) { |
4727 | // No device check |
4728 | // DeviceGuard omitted |
4729 | return at::native::__xor__(self, other); |
4730 | } |
4731 | } // anonymous namespace |
4732 | namespace { |
4733 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar___ixor__(at::Tensor & self, const at::Scalar & other) { |
4734 | // No device check |
4735 | // DeviceGuard omitted |
4736 | return at::native::__ixor__(self, other); |
4737 | } |
4738 | } // anonymous namespace |
4739 | namespace { |
4740 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor___xor__(const at::Tensor & self, const at::Tensor & other) { |
4741 | // No device check |
4742 | // DeviceGuard omitted |
4743 | return at::native::__xor__(self, other); |
4744 | } |
4745 | } // anonymous namespace |
4746 | namespace { |
4747 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor___ixor__(at::Tensor & self, const at::Tensor & other) { |
4748 | // No device check |
4749 | // DeviceGuard omitted |
4750 | return at::native::__ixor__(self, other); |
4751 | } |
4752 | } // anonymous namespace |
4753 | namespace { |
4754 | at::Tensor wrapper_CompositeImplicitAutograd__diag(const at::Tensor & self, int64_t diagonal) { |
4755 | // No device check |
4756 | // DeviceGuard omitted |
4757 | return at::native::diag(self, diagonal); |
4758 | } |
4759 | } // anonymous namespace |
4760 | namespace { |
4761 | at::Tensor & wrapper_CompositeImplicitAutograd_out_diag_out(const at::Tensor & self, int64_t diagonal, at::Tensor & out) { |
4762 | // No device check |
4763 | // DeviceGuard omitted |
4764 | return at::native::diag_out(self, diagonal, out); |
4765 | } |
4766 | } // anonymous namespace |
4767 | namespace { |
4768 | at::Tensor wrapper_CompositeImplicitAutograd__cross(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim) { |
4769 | // No device check |
4770 | // DeviceGuard omitted |
4771 | return at::native::cross(self, other, dim); |
4772 | } |
4773 | } // anonymous namespace |
4774 | namespace { |
4775 | at::Tensor & wrapper_CompositeImplicitAutograd_out_cross_out(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim, at::Tensor & out) { |
4776 | // No device check |
4777 | // DeviceGuard omitted |
4778 | return at::native::cross_out(self, other, dim, out); |
4779 | } |
4780 | } // anonymous namespace |
4781 | namespace { |
4782 | at::Tensor wrapper_CompositeImplicitAutograd__trace_backward(const at::Tensor & grad, c10::SymIntArrayRef sizes) { |
4783 | // No device check |
4784 | // DeviceGuard omitted |
4785 | return at::native::trace_backward_symint(grad, sizes); |
4786 | } |
4787 | } // anonymous namespace |
4788 | namespace { |
4789 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_not_equal(const at::Tensor & self, const at::Scalar & other) { |
4790 | // No device check |
4791 | // DeviceGuard omitted |
4792 | return at::native::not_equal(self, other); |
4793 | } |
4794 | } // anonymous namespace |
4795 | namespace { |
4796 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_out_not_equal_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
4797 | // No device check |
4798 | // DeviceGuard omitted |
4799 | return at::native::not_equal_out(self, other, out); |
4800 | } |
4801 | } // anonymous namespace |
4802 | namespace { |
4803 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_not_equal_(at::Tensor & self, const at::Scalar & other) { |
4804 | // No device check |
4805 | // DeviceGuard omitted |
4806 | return at::native::not_equal_(self, other); |
4807 | } |
4808 | } // anonymous namespace |
4809 | namespace { |
4810 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_not_equal(const at::Tensor & self, const at::Tensor & other) { |
4811 | // No device check |
4812 | // DeviceGuard omitted |
4813 | return at::native::not_equal(self, other); |
4814 | } |
4815 | } // anonymous namespace |
4816 | namespace { |
4817 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_out_not_equal_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
4818 | // No device check |
4819 | // DeviceGuard omitted |
4820 | return at::native::not_equal_out(self, other, out); |
4821 | } |
4822 | } // anonymous namespace |
4823 | namespace { |
4824 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_not_equal_(at::Tensor & self, const at::Tensor & other) { |
4825 | // No device check |
4826 | // DeviceGuard omitted |
4827 | return at::native::not_equal_(self, other); |
4828 | } |
4829 | } // anonymous namespace |
4830 | namespace { |
4831 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_greater_equal(const at::Tensor & self, const at::Scalar & other) { |
4832 | // No device check |
4833 | // DeviceGuard omitted |
4834 | return at::native::greater_equal(self, other); |
4835 | } |
4836 | } // anonymous namespace |
4837 | namespace { |
4838 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_out_greater_equal_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
4839 | // No device check |
4840 | // DeviceGuard omitted |
4841 | return at::native::greater_equal_out(self, other, out); |
4842 | } |
4843 | } // anonymous namespace |
4844 | namespace { |
4845 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_greater_equal_(at::Tensor & self, const at::Scalar & other) { |
4846 | // No device check |
4847 | // DeviceGuard omitted |
4848 | return at::native::greater_equal_(self, other); |
4849 | } |
4850 | } // anonymous namespace |
4851 | namespace { |
4852 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_greater_equal(const at::Tensor & self, const at::Tensor & other) { |
4853 | // No device check |
4854 | // DeviceGuard omitted |
4855 | return at::native::greater_equal(self, other); |
4856 | } |
4857 | } // anonymous namespace |
4858 | namespace { |
4859 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_out_greater_equal_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
4860 | // No device check |
4861 | // DeviceGuard omitted |
4862 | return at::native::greater_equal_out(self, other, out); |
4863 | } |
4864 | } // anonymous namespace |
4865 | namespace { |
4866 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_greater_equal_(at::Tensor & self, const at::Tensor & other) { |
4867 | // No device check |
4868 | // DeviceGuard omitted |
4869 | return at::native::greater_equal_(self, other); |
4870 | } |
4871 | } // anonymous namespace |
4872 | namespace { |
4873 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_less_equal(const at::Tensor & self, const at::Scalar & other) { |
4874 | // No device check |
4875 | // DeviceGuard omitted |
4876 | return at::native::less_equal(self, other); |
4877 | } |
4878 | } // anonymous namespace |
4879 | namespace { |
4880 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_out_less_equal_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
4881 | // No device check |
4882 | // DeviceGuard omitted |
4883 | return at::native::less_equal_out(self, other, out); |
4884 | } |
4885 | } // anonymous namespace |
4886 | namespace { |
4887 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_less_equal_(at::Tensor & self, const at::Scalar & other) { |
4888 | // No device check |
4889 | // DeviceGuard omitted |
4890 | return at::native::less_equal_(self, other); |
4891 | } |
4892 | } // anonymous namespace |
4893 | namespace { |
4894 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_less_equal(const at::Tensor & self, const at::Tensor & other) { |
4895 | // No device check |
4896 | // DeviceGuard omitted |
4897 | return at::native::less_equal(self, other); |
4898 | } |
4899 | } // anonymous namespace |
4900 | namespace { |
4901 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_out_less_equal_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
4902 | // No device check |
4903 | // DeviceGuard omitted |
4904 | return at::native::less_equal_out(self, other, out); |
4905 | } |
4906 | } // anonymous namespace |
4907 | namespace { |
4908 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_less_equal_(at::Tensor & self, const at::Tensor & other) { |
4909 | // No device check |
4910 | // DeviceGuard omitted |
4911 | return at::native::less_equal_(self, other); |
4912 | } |
4913 | } // anonymous namespace |
4914 | namespace { |
4915 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_greater(const at::Tensor & self, const at::Scalar & other) { |
4916 | // No device check |
4917 | // DeviceGuard omitted |
4918 | return at::native::greater(self, other); |
4919 | } |
4920 | } // anonymous namespace |
4921 | namespace { |
4922 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_out_greater_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
4923 | // No device check |
4924 | // DeviceGuard omitted |
4925 | return at::native::greater_out(self, other, out); |
4926 | } |
4927 | } // anonymous namespace |
4928 | namespace { |
4929 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_greater_(at::Tensor & self, const at::Scalar & other) { |
4930 | // No device check |
4931 | // DeviceGuard omitted |
4932 | return at::native::greater_(self, other); |
4933 | } |
4934 | } // anonymous namespace |
4935 | namespace { |
4936 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_greater(const at::Tensor & self, const at::Tensor & other) { |
4937 | // No device check |
4938 | // DeviceGuard omitted |
4939 | return at::native::greater(self, other); |
4940 | } |
4941 | } // anonymous namespace |
4942 | namespace { |
4943 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_out_greater_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
4944 | // No device check |
4945 | // DeviceGuard omitted |
4946 | return at::native::greater_out(self, other, out); |
4947 | } |
4948 | } // anonymous namespace |
4949 | namespace { |
4950 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_greater_(at::Tensor & self, const at::Tensor & other) { |
4951 | // No device check |
4952 | // DeviceGuard omitted |
4953 | return at::native::greater_(self, other); |
4954 | } |
4955 | } // anonymous namespace |
4956 | namespace { |
4957 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_less(const at::Tensor & self, const at::Scalar & other) { |
4958 | // No device check |
4959 | // DeviceGuard omitted |
4960 | return at::native::less(self, other); |
4961 | } |
4962 | } // anonymous namespace |
4963 | namespace { |
4964 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_out_less_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
4965 | // No device check |
4966 | // DeviceGuard omitted |
4967 | return at::native::less_out(self, other, out); |
4968 | } |
4969 | } // anonymous namespace |
4970 | namespace { |
4971 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_less_(at::Tensor & self, const at::Scalar & other) { |
4972 | // No device check |
4973 | // DeviceGuard omitted |
4974 | return at::native::less_(self, other); |
4975 | } |
4976 | } // anonymous namespace |
4977 | namespace { |
4978 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_less(const at::Tensor & self, const at::Tensor & other) { |
4979 | // No device check |
4980 | // DeviceGuard omitted |
4981 | return at::native::less(self, other); |
4982 | } |
4983 | } // anonymous namespace |
4984 | namespace { |
4985 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_out_less_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
4986 | // No device check |
4987 | // DeviceGuard omitted |
4988 | return at::native::less_out(self, other, out); |
4989 | } |
4990 | } // anonymous namespace |
4991 | namespace { |
4992 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_less_(at::Tensor & self, const at::Tensor & other) { |
4993 | // No device check |
4994 | // DeviceGuard omitted |
4995 | return at::native::less_(self, other); |
4996 | } |
4997 | } // anonymous namespace |
4998 | namespace { |
4999 | at::Tensor wrapper_CompositeImplicitAutograd__take_along_dim(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim) { |
5000 | // No device check |
5001 | // DeviceGuard omitted |
5002 | return at::native::take_along_dim(self, indices, dim); |
5003 | } |
5004 | } // anonymous namespace |
5005 | namespace { |
5006 | at::Tensor & wrapper_CompositeImplicitAutograd_out_take_along_dim_out(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim, at::Tensor & out) { |
5007 | // No device check |
5008 | // DeviceGuard omitted |
5009 | return at::native::take_along_dim_out(self, indices, dim, out); |
5010 | } |
5011 | } // anonymous namespace |
5012 | namespace { |
5013 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_index_select(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) { |
5014 | // No device check |
5015 | // DeviceGuard omitted |
5016 | return at::native::index_select(self, dim, index); |
5017 | } |
5018 | } // anonymous namespace |
5019 | namespace { |
5020 | at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_index_select_out(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) { |
5021 | // No device check |
5022 | // DeviceGuard omitted |
5023 | return at::native::index_select_out(self, dim, index, out); |
5024 | } |
5025 | } // anonymous namespace |
5026 | namespace { |
5027 | at::Tensor wrapper_CompositeImplicitAutograd__index_select_backward(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { |
5028 | // No device check |
5029 | // DeviceGuard omitted |
5030 | return at::native::index_select_backward_symint(grad, self_sizes, dim, index); |
5031 | } |
5032 | } // anonymous namespace |
5033 | namespace { |
5034 | at::Tensor wrapper_CompositeImplicitAutograd__masked_select_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) { |
5035 | // No device check |
5036 | // DeviceGuard omitted |
5037 | return at::native::masked_select_backward(grad, input, mask); |
5038 | } |
5039 | } // anonymous namespace |
5040 | namespace { |
5041 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__nonzero_numpy(const at::Tensor & self) { |
5042 | // No device check |
5043 | // DeviceGuard omitted |
5044 | return at::native::nonzero_numpy(self); |
5045 | } |
5046 | } // anonymous namespace |
5047 | namespace { |
5048 | at::Tensor wrapper_CompositeImplicitAutograd__argwhere(const at::Tensor & self) { |
5049 | // No device check |
5050 | // DeviceGuard omitted |
5051 | return at::native::argwhere(self); |
5052 | } |
5053 | } // anonymous namespace |
5054 | namespace { |
5055 | at::Tensor wrapper_CompositeImplicitAutograd__gather_backward(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) { |
5056 | // No device check |
5057 | // DeviceGuard omitted |
5058 | return at::native::gather_backward(grad, self, dim, index, sparse_grad); |
5059 | } |
5060 | } // anonymous namespace |
5061 | namespace { |
5062 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) { |
5063 | // No device check |
5064 | // DeviceGuard omitted |
5065 | return at::native::gather(self, dim, index, sparse_grad); |
5066 | } |
5067 | } // anonymous namespace |
5068 | namespace { |
5069 | at::Tensor & wrapper_CompositeImplicitAutograd_dimname_out_gather_out(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { |
5070 | // No device check |
5071 | // DeviceGuard omitted |
5072 | return at::native::gather_out(self, dim, index, sparse_grad, out); |
5073 | } |
5074 | } // anonymous namespace |
5075 | namespace { |
5076 | at::Tensor wrapper_CompositeImplicitAutograd___gather_sparse_backward(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) { |
5077 | // No device check |
5078 | // DeviceGuard omitted |
5079 | return at::native::_gather_sparse_backward(self, dim, index, grad); |
5080 | } |
5081 | } // anonymous namespace |
5082 | namespace { |
5083 | at::Tensor wrapper_CompositeImplicitAutograd__cross_entropy_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) { |
5084 | // No device check |
5085 | // DeviceGuard omitted |
5086 | return at::native::cross_entropy_loss_symint(self, target, weight, reduction, ignore_index, label_smoothing); |
5087 | } |
5088 | } // anonymous namespace |
5089 | namespace { |
5090 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_vander(const at::Tensor & x, c10::optional<int64_t> N) { |
5091 | // No device check |
5092 | // DeviceGuard omitted |
5093 | return at::native::linalg_vander(x, N); |
5094 | } |
5095 | } // anonymous namespace |
5096 | namespace { |
5097 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__svd(const at::Tensor & self, bool some, bool compute_uv) { |
5098 | // No device check |
5099 | // DeviceGuard omitted |
5100 | return at::native::svd(self, some, compute_uv); |
5101 | } |
5102 | } // anonymous namespace |
5103 | namespace { |
5104 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_U_svd_out(const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) { |
5105 | // No device check |
5106 | // DeviceGuard omitted |
5107 | return at::native::svd_out(self, some, compute_uv, U, S, V); |
5108 | } |
5109 | } // anonymous namespace |
5110 | namespace { |
5111 | at::Tensor wrapper_CompositeImplicitAutograd__swapaxes(const at::Tensor & self, int64_t axis0, int64_t axis1) { |
5112 | // No device check |
5113 | // DeviceGuard omitted |
5114 | return at::native::swapaxes(self, axis0, axis1); |
5115 | } |
5116 | } // anonymous namespace |
5117 | namespace { |
5118 | at::Tensor & wrapper_CompositeImplicitAutograd__swapaxes_(at::Tensor & self, int64_t axis0, int64_t axis1) { |
5119 | // No device check |
5120 | // DeviceGuard omitted |
5121 | return at::native::swapaxes_(self, axis0, axis1); |
5122 | } |
5123 | } // anonymous namespace |
5124 | namespace { |
5125 | at::Tensor wrapper_CompositeImplicitAutograd__swapdims(const at::Tensor & self, int64_t dim0, int64_t dim1) { |
5126 | // No device check |
5127 | // DeviceGuard omitted |
5128 | return at::native::swapdims(self, dim0, dim1); |
5129 | } |
5130 | } // anonymous namespace |
5131 | namespace { |
5132 | at::Tensor & wrapper_CompositeImplicitAutograd__swapdims_(at::Tensor & self, int64_t dim0, int64_t dim1) { |
5133 | // No device check |
5134 | // DeviceGuard omitted |
5135 | return at::native::swapdims_(self, dim0, dim1); |
5136 | } |
5137 | } // anonymous namespace |
5138 | namespace { |
5139 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__qr(const at::Tensor & self, bool some) { |
5140 | // No device check |
5141 | // DeviceGuard omitted |
5142 | return at::native::qr(self, some); |
5143 | } |
5144 | } // anonymous namespace |
5145 | namespace { |
5146 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_Q_qr_out(const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R) { |
5147 | // No device check |
5148 | // DeviceGuard omitted |
5149 | return at::native::qr_out(self, some, Q, R); |
5150 | } |
5151 | } // anonymous namespace |
5152 | namespace { |
5153 | at::Tensor wrapper_CompositeImplicitAutograd__orgqr(const at::Tensor & self, const at::Tensor & input2) { |
5154 | // No device check |
5155 | // DeviceGuard omitted |
5156 | return at::native::orgqr(self, input2); |
5157 | } |
5158 | } // anonymous namespace |
5159 | namespace { |
5160 | at::Tensor & wrapper_CompositeImplicitAutograd_out_orgqr_out(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) { |
5161 | // No device check |
5162 | // DeviceGuard omitted |
5163 | return at::native::orgqr_out(self, input2, out); |
5164 | } |
5165 | } // anonymous namespace |
5166 | namespace { |
5167 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___lu_with_info(const at::Tensor & self, bool pivot, bool check_errors) { |
5168 | // No device check |
5169 | // DeviceGuard omitted |
5170 | return at::native::_lu_with_info(self, pivot, check_errors); |
5171 | } |
5172 | } // anonymous namespace |
5173 | namespace { |
5174 | at::Tensor wrapper_CompositeImplicitAutograd__lu_solve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { |
5175 | // No device check |
5176 | // DeviceGuard omitted |
5177 | return at::native::lu_solve(self, LU_data, LU_pivots); |
5178 | } |
5179 | } // anonymous namespace |
5180 | namespace { |
5181 | at::Tensor & wrapper_CompositeImplicitAutograd_out_lu_solve_out(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) { |
5182 | // No device check |
5183 | // DeviceGuard omitted |
5184 | return at::native::lu_solve_out(self, LU_data, LU_pivots, out); |
5185 | } |
5186 | } // anonymous namespace |
5187 | namespace { |
5188 | at::Tensor wrapper_CompositeImplicitAutograd__arctan2(const at::Tensor & self, const at::Tensor & other) { |
5189 | // No device check |
5190 | // DeviceGuard omitted |
5191 | return at::native::arctan2(self, other); |
5192 | } |
5193 | } // anonymous namespace |
5194 | namespace { |
5195 | at::Tensor & wrapper_CompositeImplicitAutograd_out_arctan2_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
5196 | // No device check |
5197 | // DeviceGuard omitted |
5198 | return at::native::arctan2_out(self, other, out); |
5199 | } |
5200 | } // anonymous namespace |
5201 | namespace { |
5202 | at::Tensor & wrapper_CompositeImplicitAutograd__arctan2_(at::Tensor & self, const at::Tensor & other) { |
5203 | // No device check |
5204 | // DeviceGuard omitted |
5205 | return at::native::arctan2_(self, other); |
5206 | } |
5207 | } // anonymous namespace |
5208 | namespace { |
5209 | ::std::tuple<at::Tensor,::std::vector<at::Tensor>> wrapper_CompositeImplicitAutograd__histogramdd(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) { |
5210 | // No device check |
5211 | // DeviceGuard omitted |
5212 | return at::native::histogramdd(self, bins, range, weight, density); |
5213 | } |
5214 | } // anonymous namespace |
5215 | namespace { |
5216 | ::std::tuple<at::Tensor,::std::vector<at::Tensor>> wrapper_CompositeImplicitAutograd_int_bins_histogramdd(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) { |
5217 | // No device check |
5218 | // DeviceGuard omitted |
5219 | return at::native::histogramdd(self, bins, range, weight, density); |
5220 | } |
5221 | } // anonymous namespace |
5222 | namespace { |
5223 | ::std::tuple<at::Tensor,::std::vector<at::Tensor>> wrapper_CompositeImplicitAutograd_TensorList_bins_histogramdd(const at::Tensor & self, at::TensorList bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) { |
5224 | // No device check |
5225 | // DeviceGuard omitted |
5226 | return at::native::histogramdd(self, bins, range, weight, density); |
5227 | } |
5228 | } // anonymous namespace |
5229 | namespace { |
5230 | at::Tensor wrapper_CompositeImplicitAutograd_other_max(const at::Tensor & self, const at::Tensor & other) { |
5231 | // No device check |
5232 | // DeviceGuard omitted |
5233 | return at::native::max(self, other); |
5234 | } |
5235 | } // anonymous namespace |
5236 | namespace { |
5237 | at::Tensor & wrapper_CompositeImplicitAutograd_out_max_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
5238 | // No device check |
5239 | // DeviceGuard omitted |
5240 | return at::native::max_out(self, other, out); |
5241 | } |
5242 | } // anonymous namespace |
5243 | namespace { |
5244 | at::Tensor wrapper_CompositeImplicitAutograd_other_min(const at::Tensor & self, const at::Tensor & other) { |
5245 | // No device check |
5246 | // DeviceGuard omitted |
5247 | return at::native::min(self, other); |
5248 | } |
5249 | } // anonymous namespace |
5250 | namespace { |
5251 | at::Tensor & wrapper_CompositeImplicitAutograd_out_min_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
5252 | // No device check |
5253 | // DeviceGuard omitted |
5254 | return at::native::min_out(self, other, out); |
5255 | } |
5256 | } // anonymous namespace |
5257 | namespace { |
5258 | at::Tensor wrapper_CompositeImplicitAutograd__quantile(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) { |
5259 | // No device check |
5260 | // DeviceGuard omitted |
5261 | return at::native::quantile(self, q, dim, keepdim, interpolation); |
5262 | } |
5263 | } // anonymous namespace |
5264 | namespace { |
5265 | at::Tensor & wrapper_CompositeImplicitAutograd_out_quantile_out(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { |
5266 | // No device check |
5267 | // DeviceGuard omitted |
5268 | return at::native::quantile_out(self, q, dim, keepdim, interpolation, out); |
5269 | } |
5270 | } // anonymous namespace |
5271 | namespace { |
5272 | at::Tensor wrapper_CompositeImplicitAutograd_scalar_quantile(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) { |
5273 | // No device check |
5274 | // DeviceGuard omitted |
5275 | return at::native::quantile(self, q, dim, keepdim, interpolation); |
5276 | } |
5277 | } // anonymous namespace |
5278 | namespace { |
5279 | at::Tensor & wrapper_CompositeImplicitAutograd_scalar_out_quantile_out(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { |
5280 | // No device check |
5281 | // DeviceGuard omitted |
5282 | return at::native::quantile_out(self, q, dim, keepdim, interpolation, out); |
5283 | } |
5284 | } // anonymous namespace |
5285 | namespace { |
5286 | at::Tensor wrapper_CompositeImplicitAutograd__nanquantile(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) { |
5287 | // No device check |
5288 | // DeviceGuard omitted |
5289 | return at::native::nanquantile(self, q, dim, keepdim, interpolation); |
5290 | } |
5291 | } // anonymous namespace |
5292 | namespace { |
5293 | at::Tensor & wrapper_CompositeImplicitAutograd_out_nanquantile_out(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { |
5294 | // No device check |
5295 | // DeviceGuard omitted |
5296 | return at::native::nanquantile_out(self, q, dim, keepdim, interpolation, out); |
5297 | } |
5298 | } // anonymous namespace |
5299 | namespace { |
5300 | at::Tensor wrapper_CompositeImplicitAutograd_scalar_nanquantile(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) { |
5301 | // No device check |
5302 | // DeviceGuard omitted |
5303 | return at::native::nanquantile(self, q, dim, keepdim, interpolation); |
5304 | } |
5305 | } // anonymous namespace |
5306 | namespace { |
5307 | at::Tensor & wrapper_CompositeImplicitAutograd_scalar_out_nanquantile_out(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { |
5308 | // No device check |
5309 | // DeviceGuard omitted |
5310 | return at::native::nanquantile_out(self, q, dim, keepdim, interpolation, out); |
5311 | } |
5312 | } // anonymous namespace |
5313 | namespace { |
5314 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dimname_sort(const at::Tensor & self, at::Dimname dim, bool descending) { |
5315 | // No device check |
5316 | // DeviceGuard omitted |
5317 | return at::native::sort(self, dim, descending); |
5318 | } |
5319 | } // anonymous namespace |
5320 | namespace { |
5321 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_dimname_values_sort_out(const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) { |
5322 | // No device check |
5323 | // DeviceGuard omitted |
5324 | return at::native::sort_out(self, dim, descending, values, indices); |
5325 | } |
5326 | } // anonymous namespace |
5327 | namespace { |
5328 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd_dimname_stable_sort(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending) { |
5329 | // No device check |
5330 | // DeviceGuard omitted |
5331 | return at::native::sort(self, stable, dim, descending); |
5332 | } |
5333 | } // anonymous namespace |
5334 | namespace { |
5335 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_dimname_values_stable_sort_out(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) { |
5336 | // No device check |
5337 | // DeviceGuard omitted |
5338 | return at::native::sort_out(self, stable, dim, descending, values, indices); |
5339 | } |
5340 | } // anonymous namespace |
5341 | namespace { |
5342 | at::Tensor wrapper_CompositeImplicitAutograd__msort(const at::Tensor & self) { |
5343 | // No device check |
5344 | // DeviceGuard omitted |
5345 | return at::native::msort(self); |
5346 | } |
5347 | } // anonymous namespace |
5348 | namespace { |
5349 | at::Tensor & wrapper_CompositeImplicitAutograd_out_msort_out(const at::Tensor & self, at::Tensor & out) { |
5350 | // No device check |
5351 | // DeviceGuard omitted |
5352 | return at::native::msort_out(self, out); |
5353 | } |
5354 | } // anonymous namespace |
5355 | namespace { |
5356 | at::Tensor wrapper_CompositeImplicitAutograd__argsort(const at::Tensor & self, int64_t dim, bool descending) { |
5357 | // No device check |
5358 | // DeviceGuard omitted |
5359 | return at::native::argsort(self, dim, descending); |
5360 | } |
5361 | } // anonymous namespace |
5362 | namespace { |
5363 | at::Tensor wrapper_CompositeImplicitAutograd_dimname_argsort(const at::Tensor & self, at::Dimname dim, bool descending) { |
5364 | // No device check |
5365 | // DeviceGuard omitted |
5366 | return at::native::argsort(self, dim, descending); |
5367 | } |
5368 | } // anonymous namespace |
5369 | namespace { |
5370 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_Tensor_float_power(const at::Tensor & self, const at::Tensor & exponent) { |
5371 | // No device check |
5372 | // DeviceGuard omitted |
5373 | return at::native::float_power(self, exponent); |
5374 | } |
5375 | } // anonymous namespace |
5376 | namespace { |
5377 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_Tensor_out_float_power_out(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) { |
5378 | // No device check |
5379 | // DeviceGuard omitted |
5380 | return at::native::float_power_out(self, exponent, out); |
5381 | } |
5382 | } // anonymous namespace |
5383 | namespace { |
5384 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_float_power_(at::Tensor & self, const at::Tensor & exponent) { |
5385 | // No device check |
5386 | // DeviceGuard omitted |
5387 | return at::native::float_power_(self, exponent); |
5388 | } |
5389 | } // anonymous namespace |
5390 | namespace { |
5391 | at::Tensor wrapper_CompositeImplicitAutograd_Scalar_float_power(const at::Scalar & self, const at::Tensor & exponent) { |
5392 | // No device check |
5393 | // DeviceGuard omitted |
5394 | return at::native::float_power(self, exponent); |
5395 | } |
5396 | } // anonymous namespace |
5397 | namespace { |
5398 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_out_float_power_out(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) { |
5399 | // No device check |
5400 | // DeviceGuard omitted |
5401 | return at::native::float_power_out(self, exponent, out); |
5402 | } |
5403 | } // anonymous namespace |
5404 | namespace { |
5405 | at::Tensor wrapper_CompositeImplicitAutograd_Tensor_Scalar_float_power(const at::Tensor & self, const at::Scalar & exponent) { |
5406 | // No device check |
5407 | // DeviceGuard omitted |
5408 | return at::native::float_power(self, exponent); |
5409 | } |
5410 | } // anonymous namespace |
5411 | namespace { |
5412 | at::Tensor & wrapper_CompositeImplicitAutograd_Tensor_Scalar_out_float_power_out(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) { |
5413 | // No device check |
5414 | // DeviceGuard omitted |
5415 | return at::native::float_power_out(self, exponent, out); |
5416 | } |
5417 | } // anonymous namespace |
5418 | namespace { |
5419 | at::Tensor & wrapper_CompositeImplicitAutograd_Scalar_float_power_(at::Tensor & self, const at::Scalar & exponent) { |
5420 | // No device check |
5421 | // DeviceGuard omitted |
5422 | return at::native::float_power_(self, exponent); |
5423 | } |
5424 | } // anonymous namespace |
5425 | namespace { |
5426 | at::Tensor wrapper_CompositeImplicitAutograd__l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { |
5427 | // No device check |
5428 | // DeviceGuard omitted |
5429 | return at::native::l1_loss(self, target, reduction); |
5430 | } |
5431 | } // anonymous namespace |
5432 | namespace { |
5433 | at::Tensor wrapper_CompositeImplicitAutograd__multilabel_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { |
5434 | // No device check |
5435 | // DeviceGuard omitted |
5436 | return at::native::multilabel_margin_loss(self, target, reduction); |
5437 | } |
5438 | } // anonymous namespace |
5439 | namespace { |
5440 | at::Tensor & wrapper_CompositeImplicitAutograd_out_multilabel_margin_loss_out(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) { |
5441 | // No device check |
5442 | // DeviceGuard omitted |
5443 | return at::native::multilabel_margin_loss_out(self, target, reduction, out); |
5444 | } |
5445 | } // anonymous namespace |
5446 | namespace { |
5447 | at::Tensor wrapper_CompositeImplicitAutograd__nll_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) { |
5448 | // No device check |
5449 | // DeviceGuard omitted |
5450 | return at::native::nll_loss_symint(self, target, weight, reduction, ignore_index); |
5451 | } |
5452 | } // anonymous namespace |
5453 | namespace { |
5454 | at::Tensor & wrapper_CompositeImplicitAutograd_out_nll_loss_out(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) { |
5455 | // No device check |
5456 | // DeviceGuard omitted |
5457 | return at::native::nll_loss_out(self, target, weight, reduction, ignore_index.expect_int(), out); |
5458 | } |
5459 | } // anonymous namespace |
5460 | namespace { |
5461 | at::Tensor wrapper_CompositeImplicitAutograd__nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) { |
5462 | // No device check |
5463 | // DeviceGuard omitted |
5464 | return at::native::nll_loss_nd_symint(self, target, weight, reduction, ignore_index); |
5465 | } |
5466 | } // anonymous namespace |
5467 | namespace { |
5468 | at::Tensor wrapper_CompositeImplicitAutograd__nll_loss2d(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) { |
5469 | // No device check |
5470 | // DeviceGuard omitted |
5471 | return at::native::nll_loss2d_symint(self, target, weight, reduction, ignore_index); |
5472 | } |
5473 | } // anonymous namespace |
5474 | namespace { |
5475 | at::Tensor & wrapper_CompositeImplicitAutograd_out_nll_loss2d_out(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) { |
5476 | // No device check |
5477 | // DeviceGuard omitted |
5478 | return at::native::nll_loss2d_out(self, target, weight, reduction, ignore_index.expect_int(), out); |
5479 | } |
5480 | } // anonymous namespace |
5481 | namespace { |
5482 | at::Tensor wrapper_CompositeImplicitAutograd__log_sigmoid(const at::Tensor & self) { |
5483 | // No device check |
5484 | // DeviceGuard omitted |
5485 | return at::native::log_sigmoid(self); |
5486 | } |
5487 | } // anonymous namespace |
5488 | namespace { |
5489 | at::Tensor & wrapper_CompositeImplicitAutograd_out_log_sigmoid_out(const at::Tensor & self, at::Tensor & out) { |
5490 | // No device check |
5491 | // DeviceGuard omitted |
5492 | return at::native::log_sigmoid_out(self, out); |
5493 | } |
5494 | } // anonymous namespace |
5495 | namespace { |
5496 | at::Tensor wrapper_CompositeImplicitAutograd__adaptive_avg_pool2d(const at::Tensor & self, c10::SymIntArrayRef output_size) { |
5497 | // No device check |
5498 | // DeviceGuard omitted |
5499 | return at::native::adaptive_avg_pool2d_symint(self, output_size); |
5500 | } |
5501 | } // anonymous namespace |
5502 | namespace { |
5503 | at::Tensor wrapper_CompositeImplicitAutograd__adaptive_avg_pool3d(const at::Tensor & self, c10::SymIntArrayRef output_size) { |
5504 | // No device check |
5505 | // DeviceGuard omitted |
5506 | return at::native::adaptive_avg_pool3d_symint(self, output_size); |
5507 | } |
5508 | } // anonymous namespace |
5509 | namespace { |
5510 | at::Tensor wrapper_CompositeImplicitAutograd___pad_circular(const at::Tensor & self, c10::SymIntArrayRef pad) { |
5511 | // No device check |
5512 | // DeviceGuard omitted |
5513 | return at::native::_pad_circular_symint(self, pad); |
5514 | } |
5515 | } // anonymous namespace |
5516 | namespace { |
5517 | at::Tensor wrapper_CompositeImplicitAutograd___pad_enum(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional<double> value) { |
5518 | // No device check |
5519 | // DeviceGuard omitted |
5520 | return at::native::_pad_enum_symint(self, pad, mode, value); |
5521 | } |
5522 | } // anonymous namespace |
5523 | namespace { |
5524 | at::Tensor wrapper_CompositeImplicitAutograd__pad(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional<double> value) { |
5525 | // No device check |
5526 | // DeviceGuard omitted |
5527 | return at::native::pad_symint(self, pad, mode, value); |
5528 | } |
5529 | } // anonymous namespace |
5530 | namespace { |
5531 | at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_linear1d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
5532 | // No device check |
5533 | // DeviceGuard omitted |
5534 | return at::native::upsample_linear1d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, align_corners, scale_factors); |
5535 | } |
5536 | } // anonymous namespace |
5537 | namespace { |
5538 | at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_bilinear2d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
5539 | // No device check |
5540 | // DeviceGuard omitted |
5541 | return at::native::upsample_bilinear2d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, align_corners, scale_factors); |
5542 | } |
5543 | } // anonymous namespace |
5544 | namespace { |
5545 | at::Tensor wrapper_CompositeImplicitAutograd_vec__upsample_bilinear2d_aa(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
5546 | // No device check |
5547 | // DeviceGuard omitted |
5548 | return at::native::_upsample_bilinear2d_aa(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, align_corners, scale_factors); |
5549 | } |
5550 | } // anonymous namespace |
5551 | namespace { |
5552 | at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_trilinear3d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
5553 | // No device check |
5554 | // DeviceGuard omitted |
5555 | return at::native::upsample_trilinear3d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, align_corners, scale_factors); |
5556 | } |
5557 | } // anonymous namespace |
5558 | namespace { |
5559 | at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_bicubic2d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
5560 | // No device check |
5561 | // DeviceGuard omitted |
5562 | return at::native::upsample_bicubic2d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, align_corners, scale_factors); |
5563 | } |
5564 | } // anonymous namespace |
5565 | namespace { |
5566 | at::Tensor wrapper_CompositeImplicitAutograd_vec__upsample_bicubic2d_aa(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
5567 | // No device check |
5568 | // DeviceGuard omitted |
5569 | return at::native::_upsample_bicubic2d_aa(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, align_corners, scale_factors); |
5570 | } |
5571 | } // anonymous namespace |
5572 | namespace { |
5573 | at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_nearest1d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
5574 | // No device check |
5575 | // DeviceGuard omitted |
5576 | return at::native::upsample_nearest1d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, scale_factors); |
5577 | } |
5578 | } // anonymous namespace |
5579 | namespace { |
5580 | at::Tensor wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact1d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
5581 | // No device check |
5582 | // DeviceGuard omitted |
5583 | return at::native::_upsample_nearest_exact1d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, scale_factors); |
5584 | } |
5585 | } // anonymous namespace |
5586 | namespace { |
5587 | at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_nearest2d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
5588 | // No device check |
5589 | // DeviceGuard omitted |
5590 | return at::native::upsample_nearest2d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, scale_factors); |
5591 | } |
5592 | } // anonymous namespace |
5593 | namespace { |
5594 | at::Tensor wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact2d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
5595 | // No device check |
5596 | // DeviceGuard omitted |
5597 | return at::native::_upsample_nearest_exact2d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, scale_factors); |
5598 | } |
5599 | } // anonymous namespace |
5600 | namespace { |
5601 | at::Tensor wrapper_CompositeImplicitAutograd_vec_upsample_nearest3d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
5602 | // No device check |
5603 | // DeviceGuard omitted |
5604 | return at::native::upsample_nearest3d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, scale_factors); |
5605 | } |
5606 | } // anonymous namespace |
5607 | namespace { |
5608 | at::Tensor wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact3d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
5609 | // No device check |
5610 | // DeviceGuard omitted |
5611 | return at::native::_upsample_nearest_exact3d(input, output_size.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*output_size)) : c10::nullopt, scale_factors); |
5612 | } |
5613 | } // anonymous namespace |
5614 | namespace { |
5615 | at::Tensor wrapper_CompositeImplicitAutograd__thnn_conv2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) { |
5616 | // No device check |
5617 | // DeviceGuard omitted |
5618 | return at::native::thnn_conv2d(self, weight, kernel_size, bias, stride, padding); |
5619 | } |
5620 | } // anonymous namespace |
5621 | namespace { |
5622 | at::Tensor & wrapper_CompositeImplicitAutograd_out_thnn_conv2d_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { |
5623 | // No device check |
5624 | // DeviceGuard omitted |
5625 | return at::native::thnn_conv2d_out(self, weight, kernel_size, bias, stride, padding, out); |
5626 | } |
5627 | } // anonymous namespace |
5628 | namespace { |
5629 | at::Tensor wrapper_CompositeImplicitAutograd__slow_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) { |
5630 | // No device check |
5631 | // DeviceGuard omitted |
5632 | return at::native::slow_conv3d(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding)); |
5633 | } |
5634 | } // anonymous namespace |
5635 | namespace { |
5636 | at::Tensor & wrapper_CompositeImplicitAutograd_out_slow_conv3d_out(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) { |
5637 | // No device check |
5638 | // DeviceGuard omitted |
5639 | return at::native::slow_conv3d_out(self, weight, kernel_size, bias, stride, C10_AS_INTARRAYREF_SLOW(padding), out); |
5640 | } |
5641 | } // anonymous namespace |
5642 | namespace { |
5643 | at::Tensor wrapper_CompositeImplicitAutograd__column_stack(at::TensorList tensors) { |
5644 | // No device check |
5645 | // DeviceGuard omitted |
5646 | return at::native::column_stack(tensors); |
5647 | } |
5648 | } // anonymous namespace |
5649 | namespace { |
5650 | at::Tensor & wrapper_CompositeImplicitAutograd_out_column_stack_out(at::TensorList tensors, at::Tensor & out) { |
5651 | // No device check |
5652 | // DeviceGuard omitted |
5653 | return at::native::column_stack_out(tensors, out); |
5654 | } |
5655 | } // anonymous namespace |
5656 | namespace { |
5657 | at::Tensor wrapper_CompositeImplicitAutograd__isfinite(const at::Tensor & self) { |
5658 | // No device check |
5659 | // DeviceGuard omitted |
5660 | return at::native::isfinite(self); |
5661 | } |
5662 | } // anonymous namespace |
5663 | namespace { |
5664 | at::Tensor wrapper_CompositeImplicitAutograd___add_batch_dim(const at::Tensor & self, int64_t batch_dim, int64_t level) { |
5665 | // No device check |
5666 | // DeviceGuard omitted |
5667 | return at::native::_add_batch_dim(self, batch_dim, level); |
5668 | } |
5669 | } // anonymous namespace |
5670 | namespace { |
5671 | at::Tensor wrapper_CompositeImplicitAutograd___remove_batch_dim(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) { |
5672 | // No device check |
5673 | // DeviceGuard omitted |
5674 | return at::native::_remove_batch_dim(self, level, batch_size, out_dim); |
5675 | } |
5676 | } // anonymous namespace |
5677 | namespace { |
5678 | at::Tensor wrapper_CompositeImplicitAutograd__special_expm1(const at::Tensor & self) { |
5679 | // No device check |
5680 | // DeviceGuard omitted |
5681 | return at::native::special_expm1(self); |
5682 | } |
5683 | } // anonymous namespace |
5684 | namespace { |
5685 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_expm1_out(const at::Tensor & self, at::Tensor & out) { |
5686 | // No device check |
5687 | // DeviceGuard omitted |
5688 | return at::native::special_expm1_out(self, out); |
5689 | } |
5690 | } // anonymous namespace |
5691 | namespace { |
5692 | at::Tensor wrapper_CompositeImplicitAutograd__special_exp2(const at::Tensor & self) { |
5693 | // No device check |
5694 | // DeviceGuard omitted |
5695 | return at::native::special_exp2(self); |
5696 | } |
5697 | } // anonymous namespace |
5698 | namespace { |
5699 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_exp2_out(const at::Tensor & self, at::Tensor & out) { |
5700 | // No device check |
5701 | // DeviceGuard omitted |
5702 | return at::native::special_exp2_out(self, out); |
5703 | } |
5704 | } // anonymous namespace |
5705 | namespace { |
5706 | at::Tensor wrapper_CompositeImplicitAutograd__special_psi(const at::Tensor & self) { |
5707 | // No device check |
5708 | // DeviceGuard omitted |
5709 | return at::native::special_psi(self); |
5710 | } |
5711 | } // anonymous namespace |
5712 | namespace { |
5713 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_psi_out(const at::Tensor & self, at::Tensor & out) { |
5714 | // No device check |
5715 | // DeviceGuard omitted |
5716 | return at::native::special_psi_out(self, out); |
5717 | } |
5718 | } // anonymous namespace |
5719 | namespace { |
5720 | at::Tensor wrapper_CompositeImplicitAutograd__special_digamma(const at::Tensor & self) { |
5721 | // No device check |
5722 | // DeviceGuard omitted |
5723 | return at::native::special_digamma(self); |
5724 | } |
5725 | } // anonymous namespace |
5726 | namespace { |
5727 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_digamma_out(const at::Tensor & self, at::Tensor & out) { |
5728 | // No device check |
5729 | // DeviceGuard omitted |
5730 | return at::native::special_digamma_out(self, out); |
5731 | } |
5732 | } // anonymous namespace |
5733 | namespace { |
5734 | at::Tensor wrapper_CompositeImplicitAutograd__special_gammaln(const at::Tensor & self) { |
5735 | // No device check |
5736 | // DeviceGuard omitted |
5737 | return at::native::special_gammaln(self); |
5738 | } |
5739 | } // anonymous namespace |
5740 | namespace { |
5741 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_gammaln_out(const at::Tensor & self, at::Tensor & out) { |
5742 | // No device check |
5743 | // DeviceGuard omitted |
5744 | return at::native::special_gammaln_out(self, out); |
5745 | } |
5746 | } // anonymous namespace |
5747 | namespace { |
5748 | at::Tensor wrapper_CompositeImplicitAutograd__special_erf(const at::Tensor & self) { |
5749 | // No device check |
5750 | // DeviceGuard omitted |
5751 | return at::native::special_erf(self); |
5752 | } |
5753 | } // anonymous namespace |
5754 | namespace { |
5755 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_erf_out(const at::Tensor & self, at::Tensor & out) { |
5756 | // No device check |
5757 | // DeviceGuard omitted |
5758 | return at::native::special_erf_out(self, out); |
5759 | } |
5760 | } // anonymous namespace |
5761 | namespace { |
5762 | at::Tensor wrapper_CompositeImplicitAutograd__special_erfc(const at::Tensor & self) { |
5763 | // No device check |
5764 | // DeviceGuard omitted |
5765 | return at::native::special_erfc(self); |
5766 | } |
5767 | } // anonymous namespace |
5768 | namespace { |
5769 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_erfc_out(const at::Tensor & self, at::Tensor & out) { |
5770 | // No device check |
5771 | // DeviceGuard omitted |
5772 | return at::native::special_erfc_out(self, out); |
5773 | } |
5774 | } // anonymous namespace |
5775 | namespace { |
5776 | at::Tensor wrapper_CompositeImplicitAutograd__special_erfinv(const at::Tensor & self) { |
5777 | // No device check |
5778 | // DeviceGuard omitted |
5779 | return at::native::special_erfinv(self); |
5780 | } |
5781 | } // anonymous namespace |
5782 | namespace { |
5783 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_erfinv_out(const at::Tensor & self, at::Tensor & out) { |
5784 | // No device check |
5785 | // DeviceGuard omitted |
5786 | return at::native::special_erfinv_out(self, out); |
5787 | } |
5788 | } // anonymous namespace |
5789 | namespace { |
5790 | at::Tensor wrapper_CompositeImplicitAutograd__special_ndtr(const at::Tensor & self) { |
5791 | // No device check |
5792 | // DeviceGuard omitted |
5793 | return at::native::special_ndtr(self); |
5794 | } |
5795 | } // anonymous namespace |
5796 | namespace { |
5797 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_ndtr_out(const at::Tensor & self, at::Tensor & out) { |
5798 | // No device check |
5799 | // DeviceGuard omitted |
5800 | return at::native::special_ndtr_out(self, out); |
5801 | } |
5802 | } // anonymous namespace |
5803 | namespace { |
5804 | at::Tensor wrapper_CompositeImplicitAutograd__special_xlogy(const at::Tensor & self, const at::Tensor & other) { |
5805 | // No device check |
5806 | // DeviceGuard omitted |
5807 | return at::native::special_xlogy(self, other); |
5808 | } |
5809 | } // anonymous namespace |
5810 | namespace { |
5811 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_xlogy_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
5812 | // No device check |
5813 | // DeviceGuard omitted |
5814 | return at::native::special_xlogy_out(self, other, out); |
5815 | } |
5816 | } // anonymous namespace |
5817 | namespace { |
5818 | at::Tensor wrapper_CompositeImplicitAutograd_self_scalar_special_xlogy(const at::Scalar & self, const at::Tensor & other) { |
5819 | // No device check |
5820 | // DeviceGuard omitted |
5821 | return at::native::special_xlogy(self, other); |
5822 | } |
5823 | } // anonymous namespace |
5824 | namespace { |
5825 | at::Tensor & wrapper_CompositeImplicitAutograd_self_scalar_out_special_xlogy_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
5826 | // No device check |
5827 | // DeviceGuard omitted |
5828 | return at::native::special_xlogy_out(self, other, out); |
5829 | } |
5830 | } // anonymous namespace |
5831 | namespace { |
5832 | at::Tensor wrapper_CompositeImplicitAutograd_other_scalar_special_xlogy(const at::Tensor & self, const at::Scalar & other) { |
5833 | // No device check |
5834 | // DeviceGuard omitted |
5835 | return at::native::special_xlogy(self, other); |
5836 | } |
5837 | } // anonymous namespace |
5838 | namespace { |
5839 | at::Tensor & wrapper_CompositeImplicitAutograd_other_scalar_out_special_xlogy_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
5840 | // No device check |
5841 | // DeviceGuard omitted |
5842 | return at::native::special_xlogy_out(self, other, out); |
5843 | } |
5844 | } // anonymous namespace |
5845 | namespace { |
5846 | at::Tensor wrapper_CompositeImplicitAutograd__special_i0(const at::Tensor & self) { |
5847 | // No device check |
5848 | // DeviceGuard omitted |
5849 | return at::native::special_i0(self); |
5850 | } |
5851 | } // anonymous namespace |
5852 | namespace { |
5853 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_i0_out(const at::Tensor & self, at::Tensor & out) { |
5854 | // No device check |
5855 | // DeviceGuard omitted |
5856 | return at::native::special_i0_out(self, out); |
5857 | } |
5858 | } // anonymous namespace |
5859 | namespace { |
5860 | at::Tensor wrapper_CompositeImplicitAutograd__special_logit(const at::Tensor & self, c10::optional<double> eps) { |
5861 | // No device check |
5862 | // DeviceGuard omitted |
5863 | return at::native::special_logit(self, eps); |
5864 | } |
5865 | } // anonymous namespace |
5866 | namespace { |
5867 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_logit_out(const at::Tensor & self, c10::optional<double> eps, at::Tensor & out) { |
5868 | // No device check |
5869 | // DeviceGuard omitted |
5870 | return at::native::special_logit_out(self, eps, out); |
5871 | } |
5872 | } // anonymous namespace |
5873 | namespace { |
5874 | at::Tensor wrapper_CompositeImplicitAutograd__special_polygamma(int64_t n, const at::Tensor & self) { |
5875 | // No device check |
5876 | // DeviceGuard omitted |
5877 | return at::native::special_polygamma(n, self); |
5878 | } |
5879 | } // anonymous namespace |
5880 | namespace { |
5881 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_polygamma_out(int64_t n, const at::Tensor & self, at::Tensor & out) { |
5882 | // No device check |
5883 | // DeviceGuard omitted |
5884 | return at::native::special_polygamma_out(n, self, out); |
5885 | } |
5886 | } // anonymous namespace |
5887 | namespace { |
5888 | at::Tensor wrapper_CompositeImplicitAutograd__special_logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { |
5889 | // No device check |
5890 | // DeviceGuard omitted |
5891 | return at::native::special_logsumexp(self, dim, keepdim); |
5892 | } |
5893 | } // anonymous namespace |
5894 | namespace { |
5895 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_logsumexp_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { |
5896 | // No device check |
5897 | // DeviceGuard omitted |
5898 | return at::native::special_logsumexp_out(self, dim, keepdim, out); |
5899 | } |
5900 | } // anonymous namespace |
5901 | namespace { |
5902 | at::Tensor wrapper_CompositeImplicitAutograd__special_expit(const at::Tensor & self) { |
5903 | // No device check |
5904 | // DeviceGuard omitted |
5905 | return at::native::special_expit(self); |
5906 | } |
5907 | } // anonymous namespace |
5908 | namespace { |
5909 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_expit_out(const at::Tensor & self, at::Tensor & out) { |
5910 | // No device check |
5911 | // DeviceGuard omitted |
5912 | return at::native::special_expit_out(self, out); |
5913 | } |
5914 | } // anonymous namespace |
5915 | namespace { |
5916 | at::Tensor wrapper_CompositeImplicitAutograd__special_sinc(const at::Tensor & self) { |
5917 | // No device check |
5918 | // DeviceGuard omitted |
5919 | return at::native::special_sinc(self); |
5920 | } |
5921 | } // anonymous namespace |
5922 | namespace { |
5923 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_sinc_out(const at::Tensor & self, at::Tensor & out) { |
5924 | // No device check |
5925 | // DeviceGuard omitted |
5926 | return at::native::special_sinc_out(self, out); |
5927 | } |
5928 | } // anonymous namespace |
5929 | namespace { |
5930 | at::Tensor wrapper_CompositeImplicitAutograd__special_round(const at::Tensor & self, int64_t decimals) { |
5931 | // No device check |
5932 | // DeviceGuard omitted |
5933 | return at::native::special_round(self, decimals); |
5934 | } |
5935 | } // anonymous namespace |
5936 | namespace { |
5937 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_round_out(const at::Tensor & self, int64_t decimals, at::Tensor & out) { |
5938 | // No device check |
5939 | // DeviceGuard omitted |
5940 | return at::native::special_round_out(self, decimals, out); |
5941 | } |
5942 | } // anonymous namespace |
5943 | namespace { |
5944 | at::Tensor wrapper_CompositeImplicitAutograd__special_log1p(const at::Tensor & self) { |
5945 | // No device check |
5946 | // DeviceGuard omitted |
5947 | return at::native::special_log1p(self); |
5948 | } |
5949 | } // anonymous namespace |
5950 | namespace { |
5951 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_log1p_out(const at::Tensor & self, at::Tensor & out) { |
5952 | // No device check |
5953 | // DeviceGuard omitted |
5954 | return at::native::special_log1p_out(self, out); |
5955 | } |
5956 | } // anonymous namespace |
5957 | namespace { |
5958 | at::Tensor wrapper_CompositeImplicitAutograd__special_log_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
5959 | // No device check |
5960 | // DeviceGuard omitted |
5961 | return at::native::special_log_softmax(self, dim, dtype); |
5962 | } |
5963 | } // anonymous namespace |
5964 | namespace { |
5965 | at::Tensor wrapper_CompositeImplicitAutograd__special_gammainc(const at::Tensor & self, const at::Tensor & other) { |
5966 | // No device check |
5967 | // DeviceGuard omitted |
5968 | return at::native::special_gammainc(self, other); |
5969 | } |
5970 | } // anonymous namespace |
5971 | namespace { |
5972 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_gammainc_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
5973 | // No device check |
5974 | // DeviceGuard omitted |
5975 | return at::native::special_gammainc_out(self, other, out); |
5976 | } |
5977 | } // anonymous namespace |
5978 | namespace { |
5979 | at::Tensor wrapper_CompositeImplicitAutograd__special_gammaincc(const at::Tensor & self, const at::Tensor & other) { |
5980 | // No device check |
5981 | // DeviceGuard omitted |
5982 | return at::native::special_gammaincc(self, other); |
5983 | } |
5984 | } // anonymous namespace |
5985 | namespace { |
5986 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_gammaincc_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
5987 | // No device check |
5988 | // DeviceGuard omitted |
5989 | return at::native::special_gammaincc_out(self, other, out); |
5990 | } |
5991 | } // anonymous namespace |
5992 | namespace { |
5993 | at::Tensor wrapper_CompositeImplicitAutograd__special_multigammaln(const at::Tensor & self, int64_t p) { |
5994 | // No device check |
5995 | // DeviceGuard omitted |
5996 | return at::native::special_multigammaln(self, p); |
5997 | } |
5998 | } // anonymous namespace |
5999 | namespace { |
6000 | at::Tensor & wrapper_CompositeImplicitAutograd_out_special_multigammaln_out(const at::Tensor & self, int64_t p, at::Tensor & out) { |
6001 | // No device check |
6002 | // DeviceGuard omitted |
6003 | return at::native::special_multigammaln_out(self, p, out); |
6004 | } |
6005 | } // anonymous namespace |
6006 | namespace { |
6007 | at::Tensor wrapper_CompositeImplicitAutograd__special_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
6008 | // No device check |
6009 | // DeviceGuard omitted |
6010 | return at::native::special_softmax(self, dim, dtype); |
6011 | } |
6012 | } // anonymous namespace |
6013 | namespace { |
6014 | at::Tensor wrapper_CompositeImplicitAutograd__fft_fft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
6015 | // No device check |
6016 | // DeviceGuard omitted |
6017 | return at::native::fft_fft(self, n, dim, norm); |
6018 | } |
6019 | } // anonymous namespace |
6020 | namespace { |
6021 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_fft_out(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6022 | // No device check |
6023 | // DeviceGuard omitted |
6024 | return at::native::fft_fft_out(self, n, dim, norm, out); |
6025 | } |
6026 | } // anonymous namespace |
6027 | namespace { |
6028 | at::Tensor wrapper_CompositeImplicitAutograd__fft_ifft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
6029 | // No device check |
6030 | // DeviceGuard omitted |
6031 | return at::native::fft_ifft(self, n, dim, norm); |
6032 | } |
6033 | } // anonymous namespace |
6034 | namespace { |
6035 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_ifft_out(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6036 | // No device check |
6037 | // DeviceGuard omitted |
6038 | return at::native::fft_ifft_out(self, n, dim, norm, out); |
6039 | } |
6040 | } // anonymous namespace |
6041 | namespace { |
6042 | at::Tensor wrapper_CompositeImplicitAutograd__fft_rfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
6043 | // No device check |
6044 | // DeviceGuard omitted |
6045 | return at::native::fft_rfft(self, n, dim, norm); |
6046 | } |
6047 | } // anonymous namespace |
6048 | namespace { |
6049 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_rfft_out(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6050 | // No device check |
6051 | // DeviceGuard omitted |
6052 | return at::native::fft_rfft_out(self, n, dim, norm, out); |
6053 | } |
6054 | } // anonymous namespace |
6055 | namespace { |
6056 | at::Tensor wrapper_CompositeImplicitAutograd__fft_irfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
6057 | // No device check |
6058 | // DeviceGuard omitted |
6059 | return at::native::fft_irfft(self, n, dim, norm); |
6060 | } |
6061 | } // anonymous namespace |
6062 | namespace { |
6063 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_irfft_out(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6064 | // No device check |
6065 | // DeviceGuard omitted |
6066 | return at::native::fft_irfft_out(self, n, dim, norm, out); |
6067 | } |
6068 | } // anonymous namespace |
6069 | namespace { |
6070 | at::Tensor wrapper_CompositeImplicitAutograd__fft_hfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
6071 | // No device check |
6072 | // DeviceGuard omitted |
6073 | return at::native::fft_hfft(self, n, dim, norm); |
6074 | } |
6075 | } // anonymous namespace |
6076 | namespace { |
6077 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_hfft_out(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6078 | // No device check |
6079 | // DeviceGuard omitted |
6080 | return at::native::fft_hfft_out(self, n, dim, norm, out); |
6081 | } |
6082 | } // anonymous namespace |
6083 | namespace { |
6084 | at::Tensor wrapper_CompositeImplicitAutograd__fft_ihfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
6085 | // No device check |
6086 | // DeviceGuard omitted |
6087 | return at::native::fft_ihfft(self, n, dim, norm); |
6088 | } |
6089 | } // anonymous namespace |
6090 | namespace { |
6091 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_ihfft_out(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6092 | // No device check |
6093 | // DeviceGuard omitted |
6094 | return at::native::fft_ihfft_out(self, n, dim, norm, out); |
6095 | } |
6096 | } // anonymous namespace |
6097 | namespace { |
6098 | at::Tensor wrapper_CompositeImplicitAutograd__fft_fft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
6099 | // No device check |
6100 | // DeviceGuard omitted |
6101 | return at::native::fft_fft2(self, s, dim, norm); |
6102 | } |
6103 | } // anonymous namespace |
6104 | namespace { |
6105 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_fft2_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6106 | // No device check |
6107 | // DeviceGuard omitted |
6108 | return at::native::fft_fft2_out(self, s, dim, norm, out); |
6109 | } |
6110 | } // anonymous namespace |
6111 | namespace { |
6112 | at::Tensor wrapper_CompositeImplicitAutograd__fft_ifft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
6113 | // No device check |
6114 | // DeviceGuard omitted |
6115 | return at::native::fft_ifft2(self, s, dim, norm); |
6116 | } |
6117 | } // anonymous namespace |
6118 | namespace { |
6119 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_ifft2_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6120 | // No device check |
6121 | // DeviceGuard omitted |
6122 | return at::native::fft_ifft2_out(self, s, dim, norm, out); |
6123 | } |
6124 | } // anonymous namespace |
6125 | namespace { |
6126 | at::Tensor wrapper_CompositeImplicitAutograd__fft_rfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
6127 | // No device check |
6128 | // DeviceGuard omitted |
6129 | return at::native::fft_rfft2(self, s, dim, norm); |
6130 | } |
6131 | } // anonymous namespace |
6132 | namespace { |
6133 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_rfft2_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6134 | // No device check |
6135 | // DeviceGuard omitted |
6136 | return at::native::fft_rfft2_out(self, s, dim, norm, out); |
6137 | } |
6138 | } // anonymous namespace |
6139 | namespace { |
6140 | at::Tensor wrapper_CompositeImplicitAutograd__fft_irfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
6141 | // No device check |
6142 | // DeviceGuard omitted |
6143 | return at::native::fft_irfft2(self, s, dim, norm); |
6144 | } |
6145 | } // anonymous namespace |
6146 | namespace { |
6147 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_irfft2_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6148 | // No device check |
6149 | // DeviceGuard omitted |
6150 | return at::native::fft_irfft2_out(self, s, dim, norm, out); |
6151 | } |
6152 | } // anonymous namespace |
6153 | namespace { |
6154 | at::Tensor wrapper_CompositeImplicitAutograd__fft_hfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
6155 | // No device check |
6156 | // DeviceGuard omitted |
6157 | return at::native::fft_hfft2(self, s, dim, norm); |
6158 | } |
6159 | } // anonymous namespace |
6160 | namespace { |
6161 | const at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_hfft2_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) { |
6162 | // No device check |
6163 | // DeviceGuard omitted |
6164 | return at::native::fft_hfft2_out(self, s, dim, norm, out); |
6165 | } |
6166 | } // anonymous namespace |
6167 | namespace { |
6168 | at::Tensor wrapper_CompositeImplicitAutograd__fft_ihfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
6169 | // No device check |
6170 | // DeviceGuard omitted |
6171 | return at::native::fft_ihfft2(self, s, dim, norm); |
6172 | } |
6173 | } // anonymous namespace |
6174 | namespace { |
6175 | const at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_ihfft2_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) { |
6176 | // No device check |
6177 | // DeviceGuard omitted |
6178 | return at::native::fft_ihfft2_out(self, s, dim, norm, out); |
6179 | } |
6180 | } // anonymous namespace |
6181 | namespace { |
6182 | at::Tensor wrapper_CompositeImplicitAutograd__fft_fftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
6183 | // No device check |
6184 | // DeviceGuard omitted |
6185 | return at::native::fft_fftn(self, s, dim, norm); |
6186 | } |
6187 | } // anonymous namespace |
6188 | namespace { |
6189 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_fftn_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6190 | // No device check |
6191 | // DeviceGuard omitted |
6192 | return at::native::fft_fftn_out(self, s, dim, norm, out); |
6193 | } |
6194 | } // anonymous namespace |
6195 | namespace { |
6196 | at::Tensor wrapper_CompositeImplicitAutograd__fft_ifftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
6197 | // No device check |
6198 | // DeviceGuard omitted |
6199 | return at::native::fft_ifftn(self, s, dim, norm); |
6200 | } |
6201 | } // anonymous namespace |
6202 | namespace { |
6203 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_ifftn_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6204 | // No device check |
6205 | // DeviceGuard omitted |
6206 | return at::native::fft_ifftn_out(self, s, dim, norm, out); |
6207 | } |
6208 | } // anonymous namespace |
6209 | namespace { |
6210 | at::Tensor wrapper_CompositeImplicitAutograd__fft_rfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
6211 | // No device check |
6212 | // DeviceGuard omitted |
6213 | return at::native::fft_rfftn(self, s, dim, norm); |
6214 | } |
6215 | } // anonymous namespace |
6216 | namespace { |
6217 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_rfftn_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6218 | // No device check |
6219 | // DeviceGuard omitted |
6220 | return at::native::fft_rfftn_out(self, s, dim, norm, out); |
6221 | } |
6222 | } // anonymous namespace |
6223 | namespace { |
6224 | at::Tensor wrapper_CompositeImplicitAutograd__fft_irfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
6225 | // No device check |
6226 | // DeviceGuard omitted |
6227 | return at::native::fft_irfftn(self, s, dim, norm); |
6228 | } |
6229 | } // anonymous namespace |
6230 | namespace { |
6231 | at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_irfftn_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
6232 | // No device check |
6233 | // DeviceGuard omitted |
6234 | return at::native::fft_irfftn_out(self, s, dim, norm, out); |
6235 | } |
6236 | } // anonymous namespace |
6237 | namespace { |
6238 | at::Tensor wrapper_CompositeImplicitAutograd__fft_hfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
6239 | // No device check |
6240 | // DeviceGuard omitted |
6241 | return at::native::fft_hfftn(self, s, dim, norm); |
6242 | } |
6243 | } // anonymous namespace |
6244 | namespace { |
6245 | const at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_hfftn_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) { |
6246 | // No device check |
6247 | // DeviceGuard omitted |
6248 | return at::native::fft_hfftn_out(self, s, dim, norm, out); |
6249 | } |
6250 | } // anonymous namespace |
6251 | namespace { |
6252 | at::Tensor wrapper_CompositeImplicitAutograd__fft_ihfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
6253 | // No device check |
6254 | // DeviceGuard omitted |
6255 | return at::native::fft_ihfftn(self, s, dim, norm); |
6256 | } |
6257 | } // anonymous namespace |
6258 | namespace { |
6259 | const at::Tensor & wrapper_CompositeImplicitAutograd_out_fft_ihfftn_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) { |
6260 | // No device check |
6261 | // DeviceGuard omitted |
6262 | return at::native::fft_ihfftn_out(self, s, dim, norm, out); |
6263 | } |
6264 | } // anonymous namespace |
6265 | namespace { |
6266 | at::Tensor wrapper_CompositeImplicitAutograd__fft_fftshift(const at::Tensor & self, at::OptionalIntArrayRef dim) { |
6267 | // No device check |
6268 | // DeviceGuard omitted |
6269 | return at::native::fft_fftshift(self, dim); |
6270 | } |
6271 | } // anonymous namespace |
6272 | namespace { |
6273 | at::Tensor wrapper_CompositeImplicitAutograd__fft_ifftshift(const at::Tensor & self, at::OptionalIntArrayRef dim) { |
6274 | // No device check |
6275 | // DeviceGuard omitted |
6276 | return at::native::fft_ifftshift(self, dim); |
6277 | } |
6278 | } // anonymous namespace |
6279 | namespace { |
6280 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_cholesky(const at::Tensor & self, bool upper) { |
6281 | // No device check |
6282 | // DeviceGuard omitted |
6283 | return at::native::linalg_cholesky(self, upper); |
6284 | } |
6285 | } // anonymous namespace |
6286 | namespace { |
6287 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_cholesky_out(const at::Tensor & self, bool upper, at::Tensor & out) { |
6288 | // No device check |
6289 | // DeviceGuard omitted |
6290 | return at::native::linalg_cholesky_out(self, upper, out); |
6291 | } |
6292 | } // anonymous namespace |
6293 | namespace { |
6294 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__linalg_lu_factor(const at::Tensor & A, bool pivot) { |
6295 | // No device check |
6296 | // DeviceGuard omitted |
6297 | return at::native::linalg_lu_factor(A, pivot); |
6298 | } |
6299 | } // anonymous namespace |
6300 | namespace { |
6301 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_out_linalg_lu_factor_out(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) { |
6302 | // No device check |
6303 | // DeviceGuard omitted |
6304 | return at::native::linalg_lu_factor_out(A, pivot, LU, pivots); |
6305 | } |
6306 | } // anonymous namespace |
6307 | namespace { |
6308 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_det(const at::Tensor & A) { |
6309 | // No device check |
6310 | // DeviceGuard omitted |
6311 | return at::native::linalg_det(A); |
6312 | } |
6313 | } // anonymous namespace |
6314 | namespace { |
6315 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_det_out(const at::Tensor & A, at::Tensor & out) { |
6316 | // No device check |
6317 | // DeviceGuard omitted |
6318 | return at::native::linalg_det_out(A, out); |
6319 | } |
6320 | } // anonymous namespace |
6321 | namespace { |
6322 | at::Tensor wrapper_CompositeImplicitAutograd__det(const at::Tensor & self) { |
6323 | // No device check |
6324 | // DeviceGuard omitted |
6325 | return at::native::det(self); |
6326 | } |
6327 | } // anonymous namespace |
6328 | namespace { |
6329 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__linalg_ldl_factor(const at::Tensor & self, bool hermitian) { |
6330 | // No device check |
6331 | // DeviceGuard omitted |
6332 | return at::native::linalg_ldl_factor(self, hermitian); |
6333 | } |
6334 | } // anonymous namespace |
6335 | namespace { |
6336 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_out_linalg_ldl_factor_out(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) { |
6337 | // No device check |
6338 | // DeviceGuard omitted |
6339 | return at::native::linalg_ldl_factor_out(self, hermitian, LD, pivots); |
6340 | } |
6341 | } // anonymous namespace |
6342 | namespace { |
6343 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_matmul(const at::Tensor & self, const at::Tensor & other) { |
6344 | // No device check |
6345 | // DeviceGuard omitted |
6346 | return at::native::linalg_matmul(self, other); |
6347 | } |
6348 | } // anonymous namespace |
6349 | namespace { |
6350 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_matmul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
6351 | // No device check |
6352 | // DeviceGuard omitted |
6353 | return at::native::linalg_matmul_out(self, other, out); |
6354 | } |
6355 | } // anonymous namespace |
6356 | namespace { |
6357 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_vecdot(const at::Tensor & x, const at::Tensor & y, int64_t dim) { |
6358 | // No device check |
6359 | // DeviceGuard omitted |
6360 | return at::native::linalg_vecdot(x, y, dim); |
6361 | } |
6362 | } // anonymous namespace |
6363 | namespace { |
6364 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_vecdot_out(const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) { |
6365 | // No device check |
6366 | // DeviceGuard omitted |
6367 | return at::native::linalg_vecdot_out(x, y, dim, out); |
6368 | } |
6369 | } // anonymous namespace |
6370 | namespace { |
6371 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__linalg_slogdet(const at::Tensor & A) { |
6372 | // No device check |
6373 | // DeviceGuard omitted |
6374 | return at::native::linalg_slogdet(A); |
6375 | } |
6376 | } // anonymous namespace |
6377 | namespace { |
6378 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_out_linalg_slogdet_out(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) { |
6379 | // No device check |
6380 | // DeviceGuard omitted |
6381 | return at::native::linalg_slogdet_out(A, sign, logabsdet); |
6382 | } |
6383 | } // anonymous namespace |
6384 | namespace { |
6385 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__slogdet(const at::Tensor & self) { |
6386 | // No device check |
6387 | // DeviceGuard omitted |
6388 | return at::native::slogdet(self); |
6389 | } |
6390 | } // anonymous namespace |
6391 | namespace { |
6392 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_out_slogdet_out(const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet) { |
6393 | // No device check |
6394 | // DeviceGuard omitted |
6395 | return at::native::slogdet_out(self, sign, logabsdet); |
6396 | } |
6397 | } // anonymous namespace |
6398 | namespace { |
6399 | at::Tensor wrapper_CompositeImplicitAutograd__logdet(const at::Tensor & self) { |
6400 | // No device check |
6401 | // DeviceGuard omitted |
6402 | return at::native::logdet(self); |
6403 | } |
6404 | } // anonymous namespace |
6405 | namespace { |
6406 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_eigvals(const at::Tensor & self) { |
6407 | // No device check |
6408 | // DeviceGuard omitted |
6409 | return at::native::linalg_eigvals(self); |
6410 | } |
6411 | } // anonymous namespace |
6412 | namespace { |
6413 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_eigvals_out(const at::Tensor & self, at::Tensor & out) { |
6414 | // No device check |
6415 | // DeviceGuard omitted |
6416 | return at::native::linalg_eigvals_out(self, out); |
6417 | } |
6418 | } // anonymous namespace |
6419 | namespace { |
6420 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__linalg_eigh(const at::Tensor & self, c10::string_view UPLO) { |
6421 | // No device check |
6422 | // DeviceGuard omitted |
6423 | return at::native::linalg_eigh(self, UPLO); |
6424 | } |
6425 | } // anonymous namespace |
6426 | namespace { |
6427 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_eigvals_linalg_eigh_out(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) { |
6428 | // No device check |
6429 | // DeviceGuard omitted |
6430 | return at::native::linalg_eigh_out(self, UPLO, eigvals, eigvecs); |
6431 | } |
6432 | } // anonymous namespace |
6433 | namespace { |
6434 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_eigvalsh(const at::Tensor & self, c10::string_view UPLO) { |
6435 | // No device check |
6436 | // DeviceGuard omitted |
6437 | return at::native::linalg_eigvalsh(self, UPLO); |
6438 | } |
6439 | } // anonymous namespace |
6440 | namespace { |
6441 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_eigvalsh_out(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) { |
6442 | // No device check |
6443 | // DeviceGuard omitted |
6444 | return at::native::linalg_eigvalsh_out(self, UPLO, out); |
6445 | } |
6446 | } // anonymous namespace |
6447 | namespace { |
6448 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_inv(const at::Tensor & A) { |
6449 | // No device check |
6450 | // DeviceGuard omitted |
6451 | return at::native::linalg_inv(A); |
6452 | } |
6453 | } // anonymous namespace |
6454 | namespace { |
6455 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_inv_out(const at::Tensor & A, at::Tensor & out) { |
6456 | // No device check |
6457 | // DeviceGuard omitted |
6458 | return at::native::linalg_inv_out(A, out); |
6459 | } |
6460 | } // anonymous namespace |
6461 | namespace { |
6462 | at::Tensor wrapper_CompositeImplicitAutograd__inverse(const at::Tensor & self) { |
6463 | // No device check |
6464 | // DeviceGuard omitted |
6465 | return at::native::inverse(self); |
6466 | } |
6467 | } // anonymous namespace |
6468 | namespace { |
6469 | at::Tensor & wrapper_CompositeImplicitAutograd_out_inverse_out(const at::Tensor & self, at::Tensor & out) { |
6470 | // No device check |
6471 | // DeviceGuard omitted |
6472 | return at::native::inverse_out(self, out); |
6473 | } |
6474 | } // anonymous namespace |
6475 | namespace { |
6476 | at::Tensor wrapper_CompositeImplicitAutograd__inner(const at::Tensor & self, const at::Tensor & other) { |
6477 | // No device check |
6478 | // DeviceGuard omitted |
6479 | return at::native::inner(self, other); |
6480 | } |
6481 | } // anonymous namespace |
6482 | namespace { |
6483 | at::Tensor & wrapper_CompositeImplicitAutograd_out_inner_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
6484 | // No device check |
6485 | // DeviceGuard omitted |
6486 | return at::native::inner_out(self, other, out); |
6487 | } |
6488 | } // anonymous namespace |
6489 | namespace { |
6490 | at::Tensor wrapper_CompositeImplicitAutograd__outer(const at::Tensor & self, const at::Tensor & vec2) { |
6491 | // No device check |
6492 | // DeviceGuard omitted |
6493 | return at::native::outer(self, vec2); |
6494 | } |
6495 | } // anonymous namespace |
6496 | namespace { |
6497 | at::Tensor & wrapper_CompositeImplicitAutograd_out_outer_out(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) { |
6498 | // No device check |
6499 | // DeviceGuard omitted |
6500 | return at::native::outer_out(self, vec2, out); |
6501 | } |
6502 | } // anonymous namespace |
6503 | namespace { |
6504 | at::Tensor wrapper_CompositeImplicitAutograd__ger(const at::Tensor & self, const at::Tensor & vec2) { |
6505 | // No device check |
6506 | // DeviceGuard omitted |
6507 | return at::native::ger(self, vec2); |
6508 | } |
6509 | } // anonymous namespace |
6510 | namespace { |
6511 | at::Tensor & wrapper_CompositeImplicitAutograd_out_ger_out(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) { |
6512 | // No device check |
6513 | // DeviceGuard omitted |
6514 | return at::native::ger_out(self, vec2, out); |
6515 | } |
6516 | } // anonymous namespace |
6517 | namespace { |
6518 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_norm(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
6519 | // No device check |
6520 | // DeviceGuard omitted |
6521 | return at::native::linalg_norm(self, ord, dim, keepdim, dtype); |
6522 | } |
6523 | } // anonymous namespace |
6524 | namespace { |
6525 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_norm_out(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
6526 | // No device check |
6527 | // DeviceGuard omitted |
6528 | return at::native::linalg_norm_out(self, ord, dim, keepdim, dtype, out); |
6529 | } |
6530 | } // anonymous namespace |
6531 | namespace { |
6532 | at::Tensor wrapper_CompositeImplicitAutograd_ord_str_linalg_norm(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
6533 | // No device check |
6534 | // DeviceGuard omitted |
6535 | return at::native::linalg_norm(self, ord, dim, keepdim, dtype); |
6536 | } |
6537 | } // anonymous namespace |
6538 | namespace { |
6539 | at::Tensor & wrapper_CompositeImplicitAutograd_ord_str_out_linalg_norm_out(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
6540 | // No device check |
6541 | // DeviceGuard omitted |
6542 | return at::native::linalg_norm_out(self, ord, dim, keepdim, dtype, out); |
6543 | } |
6544 | } // anonymous namespace |
6545 | namespace { |
6546 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_matrix_norm(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
6547 | // No device check |
6548 | // DeviceGuard omitted |
6549 | return at::native::linalg_matrix_norm(self, ord, dim, keepdim, dtype); |
6550 | } |
6551 | } // anonymous namespace |
6552 | namespace { |
6553 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_matrix_norm_out(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
6554 | // No device check |
6555 | // DeviceGuard omitted |
6556 | return at::native::linalg_matrix_norm_out(self, ord, dim, keepdim, dtype, out); |
6557 | } |
6558 | } // anonymous namespace |
6559 | namespace { |
6560 | at::Tensor wrapper_CompositeImplicitAutograd_str_ord_linalg_matrix_norm(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
6561 | // No device check |
6562 | // DeviceGuard omitted |
6563 | return at::native::linalg_matrix_norm(self, ord, dim, keepdim, dtype); |
6564 | } |
6565 | } // anonymous namespace |
6566 | namespace { |
6567 | at::Tensor & wrapper_CompositeImplicitAutograd_str_ord_out_linalg_matrix_norm_out(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
6568 | // No device check |
6569 | // DeviceGuard omitted |
6570 | return at::native::linalg_matrix_norm_out(self, ord, dim, keepdim, dtype, out); |
6571 | } |
6572 | } // anonymous namespace |
6573 | namespace { |
6574 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__linalg_svd(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver) { |
6575 | // No device check |
6576 | // DeviceGuard omitted |
6577 | return at::native::linalg_svd(A, full_matrices, driver); |
6578 | } |
6579 | } // anonymous namespace |
6580 | namespace { |
6581 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_U_linalg_svd_out(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) { |
6582 | // No device check |
6583 | // DeviceGuard omitted |
6584 | return at::native::linalg_svd_out(A, full_matrices, driver, U, S, Vh); |
6585 | } |
6586 | } // anonymous namespace |
6587 | namespace { |
6588 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_svdvals(const at::Tensor & A, c10::optional<c10::string_view> driver) { |
6589 | // No device check |
6590 | // DeviceGuard omitted |
6591 | return at::native::linalg_svdvals(A, driver); |
6592 | } |
6593 | } // anonymous namespace |
6594 | namespace { |
6595 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_svdvals_out(const at::Tensor & A, c10::optional<c10::string_view> driver, at::Tensor & out) { |
6596 | // No device check |
6597 | // DeviceGuard omitted |
6598 | return at::native::linalg_svdvals_out(A, driver, out); |
6599 | } |
6600 | } // anonymous namespace |
6601 | namespace { |
6602 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_cond(const at::Tensor & self, const c10::optional<at::Scalar> & p) { |
6603 | // No device check |
6604 | // DeviceGuard omitted |
6605 | return at::native::linalg_cond(self, p); |
6606 | } |
6607 | } // anonymous namespace |
6608 | namespace { |
6609 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_cond_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::Tensor & out) { |
6610 | // No device check |
6611 | // DeviceGuard omitted |
6612 | return at::native::linalg_cond_out(self, p, out); |
6613 | } |
6614 | } // anonymous namespace |
6615 | namespace { |
6616 | at::Tensor wrapper_CompositeImplicitAutograd_p_str_linalg_cond(const at::Tensor & self, c10::string_view p) { |
6617 | // No device check |
6618 | // DeviceGuard omitted |
6619 | return at::native::linalg_cond(self, p); |
6620 | } |
6621 | } // anonymous namespace |
6622 | namespace { |
6623 | at::Tensor & wrapper_CompositeImplicitAutograd_p_str_out_linalg_cond_out(const at::Tensor & self, c10::string_view p, at::Tensor & out) { |
6624 | // No device check |
6625 | // DeviceGuard omitted |
6626 | return at::native::linalg_cond_out(self, p, out); |
6627 | } |
6628 | } // anonymous namespace |
6629 | namespace { |
6630 | at::Tensor wrapper_CompositeImplicitAutograd_atol_rtol_float_linalg_pinv(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) { |
6631 | // No device check |
6632 | // DeviceGuard omitted |
6633 | return at::native::linalg_pinv(self, atol, rtol, hermitian); |
6634 | } |
6635 | } // anonymous namespace |
6636 | namespace { |
6637 | at::Tensor & wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_pinv_out(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) { |
6638 | // No device check |
6639 | // DeviceGuard omitted |
6640 | return at::native::linalg_pinv_out(self, atol, rtol, hermitian, out); |
6641 | } |
6642 | } // anonymous namespace |
6643 | namespace { |
6644 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_pinv(const at::Tensor & self, double rcond, bool hermitian) { |
6645 | // No device check |
6646 | // DeviceGuard omitted |
6647 | return at::native::linalg_pinv(self, rcond, hermitian); |
6648 | } |
6649 | } // anonymous namespace |
6650 | namespace { |
6651 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_pinv_out(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) { |
6652 | // No device check |
6653 | // DeviceGuard omitted |
6654 | return at::native::linalg_pinv_out(self, rcond, hermitian, out); |
6655 | } |
6656 | } // anonymous namespace |
6657 | namespace { |
6658 | at::Tensor wrapper_CompositeImplicitAutograd_rcond_tensor_linalg_pinv(const at::Tensor & self, const at::Tensor & rcond, bool hermitian) { |
6659 | // No device check |
6660 | // DeviceGuard omitted |
6661 | return at::native::linalg_pinv(self, rcond, hermitian); |
6662 | } |
6663 | } // anonymous namespace |
6664 | namespace { |
6665 | at::Tensor & wrapper_CompositeImplicitAutograd_out_rcond_tensor_linalg_pinv_out(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) { |
6666 | // No device check |
6667 | // DeviceGuard omitted |
6668 | return at::native::linalg_pinv_out(self, rcond, hermitian, out); |
6669 | } |
6670 | } // anonymous namespace |
6671 | namespace { |
6672 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd__linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) { |
6673 | // No device check |
6674 | // DeviceGuard omitted |
6675 | return at::native::linalg_solve_ex(A, B, left, check_errors); |
6676 | } |
6677 | } // anonymous namespace |
6678 | namespace { |
6679 | ::std::tuple<at::Tensor &,at::Tensor &> wrapper_CompositeImplicitAutograd_out_linalg_solve_ex_out(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) { |
6680 | // No device check |
6681 | // DeviceGuard omitted |
6682 | return at::native::linalg_solve_ex_out(A, B, left, check_errors, result, info); |
6683 | } |
6684 | } // anonymous namespace |
6685 | namespace { |
6686 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_solve(const at::Tensor & A, const at::Tensor & B, bool left) { |
6687 | // No device check |
6688 | // DeviceGuard omitted |
6689 | return at::native::linalg_solve(A, B, left); |
6690 | } |
6691 | } // anonymous namespace |
6692 | namespace { |
6693 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_solve_out(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) { |
6694 | // No device check |
6695 | // DeviceGuard omitted |
6696 | return at::native::linalg_solve_out(A, B, left, out); |
6697 | } |
6698 | } // anonymous namespace |
6699 | namespace { |
6700 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_tensorinv(const at::Tensor & self, int64_t ind) { |
6701 | // No device check |
6702 | // DeviceGuard omitted |
6703 | return at::native::linalg_tensorinv(self, ind); |
6704 | } |
6705 | } // anonymous namespace |
6706 | namespace { |
6707 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_tensorinv_out(const at::Tensor & self, int64_t ind, at::Tensor & out) { |
6708 | // No device check |
6709 | // DeviceGuard omitted |
6710 | return at::native::linalg_tensorinv_out(self, ind, out); |
6711 | } |
6712 | } // anonymous namespace |
6713 | namespace { |
6714 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_tensorsolve(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) { |
6715 | // No device check |
6716 | // DeviceGuard omitted |
6717 | return at::native::linalg_tensorsolve(self, other, dims); |
6718 | } |
6719 | } // anonymous namespace |
6720 | namespace { |
6721 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_tensorsolve_out(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) { |
6722 | // No device check |
6723 | // DeviceGuard omitted |
6724 | return at::native::linalg_tensorsolve_out(self, other, dims, out); |
6725 | } |
6726 | } // anonymous namespace |
6727 | namespace { |
6728 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_matrix_power(const at::Tensor & self, int64_t n) { |
6729 | // No device check |
6730 | // DeviceGuard omitted |
6731 | return at::native::linalg_matrix_power(self, n); |
6732 | } |
6733 | } // anonymous namespace |
6734 | namespace { |
6735 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_matrix_power_out(const at::Tensor & self, int64_t n, at::Tensor & out) { |
6736 | // No device check |
6737 | // DeviceGuard omitted |
6738 | return at::native::linalg_matrix_power_out(self, n, out); |
6739 | } |
6740 | } // anonymous namespace |
6741 | namespace { |
6742 | at::Tensor wrapper_CompositeImplicitAutograd_atol_rtol_tensor_linalg_matrix_rank(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) { |
6743 | // No device check |
6744 | // DeviceGuard omitted |
6745 | return at::native::linalg_matrix_rank(input, atol, rtol, hermitian); |
6746 | } |
6747 | } // anonymous namespace |
6748 | namespace { |
6749 | at::Tensor & wrapper_CompositeImplicitAutograd_atol_rtol_tensor_out_linalg_matrix_rank_out(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) { |
6750 | // No device check |
6751 | // DeviceGuard omitted |
6752 | return at::native::linalg_matrix_rank_out(input, atol, rtol, hermitian, out); |
6753 | } |
6754 | } // anonymous namespace |
6755 | namespace { |
6756 | at::Tensor wrapper_CompositeImplicitAutograd_atol_rtol_float_linalg_matrix_rank(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) { |
6757 | // No device check |
6758 | // DeviceGuard omitted |
6759 | return at::native::linalg_matrix_rank(self, atol, rtol, hermitian); |
6760 | } |
6761 | } // anonymous namespace |
6762 | namespace { |
6763 | at::Tensor & wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_matrix_rank_out(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) { |
6764 | // No device check |
6765 | // DeviceGuard omitted |
6766 | return at::native::linalg_matrix_rank_out(self, atol, rtol, hermitian, out); |
6767 | } |
6768 | } // anonymous namespace |
6769 | namespace { |
6770 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_matrix_rank(const at::Tensor & self, double tol, bool hermitian) { |
6771 | // No device check |
6772 | // DeviceGuard omitted |
6773 | return at::native::linalg_matrix_rank(self, tol, hermitian); |
6774 | } |
6775 | } // anonymous namespace |
6776 | namespace { |
6777 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_matrix_rank_out(const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) { |
6778 | // No device check |
6779 | // DeviceGuard omitted |
6780 | return at::native::linalg_matrix_rank_out(self, tol, hermitian, out); |
6781 | } |
6782 | } // anonymous namespace |
6783 | namespace { |
6784 | at::Tensor wrapper_CompositeImplicitAutograd_tol_tensor_linalg_matrix_rank(const at::Tensor & input, const at::Tensor & tol, bool hermitian) { |
6785 | // No device check |
6786 | // DeviceGuard omitted |
6787 | return at::native::linalg_matrix_rank(input, tol, hermitian); |
6788 | } |
6789 | } // anonymous namespace |
6790 | namespace { |
6791 | at::Tensor & wrapper_CompositeImplicitAutograd_out_tol_tensor_linalg_matrix_rank_out(const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) { |
6792 | // No device check |
6793 | // DeviceGuard omitted |
6794 | return at::native::linalg_matrix_rank_out(input, tol, hermitian, out); |
6795 | } |
6796 | } // anonymous namespace |
6797 | namespace { |
6798 | at::Tensor wrapper_CompositeImplicitAutograd__linalg_multi_dot(at::TensorList tensors) { |
6799 | // No device check |
6800 | // DeviceGuard omitted |
6801 | return at::native::linalg_multi_dot(tensors); |
6802 | } |
6803 | } // anonymous namespace |
6804 | namespace { |
6805 | at::Tensor & wrapper_CompositeImplicitAutograd_out_linalg_multi_dot_out(at::TensorList tensors, at::Tensor & out) { |
6806 | // No device check |
6807 | // DeviceGuard omitted |
6808 | return at::native::linalg_multi_dot_out(tensors, out); |
6809 | } |
6810 | } // anonymous namespace |
6811 | namespace { |
6812 | at::Tensor wrapper_CompositeImplicitAutograd__nested_to_padded_tensor(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) { |
6813 | // No device check |
6814 | // DeviceGuard omitted |
6815 | return at::native::nested_to_padded_tensor(self, padding, output_size); |
6816 | } |
6817 | } // anonymous namespace |
6818 | namespace { |
6819 | at::Tensor wrapper_CompositeImplicitAutograd___test_serialization_subcmul(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
6820 | // No device check |
6821 | // DeviceGuard omitted |
6822 | return at::native::_test_serialization_subcmul(self, other, alpha); |
6823 | } |
6824 | } // anonymous namespace |
6825 | namespace { |
6826 | at::Tensor wrapper_CompositeImplicitAutograd___test_string_default(const at::Tensor & dummy, c10::string_view a, c10::string_view b) { |
6827 | // No device check |
6828 | // DeviceGuard omitted |
6829 | return at::native::_test_string_default(dummy, a, b); |
6830 | } |
6831 | } // anonymous namespace |
6832 | namespace { |
6833 | at::Tensor wrapper_CompositeImplicitAutograd_a__test_ambiguous_defaults(const at::Tensor & dummy, int64_t a, int64_t b) { |
6834 | // No device check |
6835 | // DeviceGuard omitted |
6836 | return at::native::_test_ambiguous_defaults(dummy, a, b); |
6837 | } |
6838 | } // anonymous namespace |
6839 | namespace { |
6840 | at::Tensor wrapper_CompositeImplicitAutograd_b__test_ambiguous_defaults(const at::Tensor & dummy, int64_t a, c10::string_view b) { |
6841 | // No device check |
6842 | // DeviceGuard omitted |
6843 | return at::native::_test_ambiguous_defaults(dummy, a, b); |
6844 | } |
6845 | } // anonymous namespace |
6846 | namespace { |
6847 | at::Tensor wrapper_CompositeImplicitAutograd_ntonly__test_autograd_multiple_dispatch(const at::Tensor & self, bool b) { |
6848 | // No device check |
6849 | // DeviceGuard omitted |
6850 | return at::native::_test_autograd_multiple_dispatch_ntonly(self, b); |
6851 | } |
6852 | } // anonymous namespace |
6853 | namespace { |
6854 | at::Tensor wrapper_CompositeImplicitAutograd__pad_sequence(at::TensorList sequences, bool batch_first, double padding_value) { |
6855 | // No device check |
6856 | // DeviceGuard omitted |
6857 | return at::native::pad_sequence(sequences, batch_first, padding_value); |
6858 | } |
6859 | } // anonymous namespace |
6860 | namespace { |
6861 | at::Tensor wrapper_CompositeImplicitAutograd__flatten_dense_tensors(at::TensorList tensors) { |
6862 | // No device check |
6863 | // DeviceGuard omitted |
6864 | return at::native::flatten_dense_tensors(tensors); |
6865 | } |
6866 | } // anonymous namespace |
6867 | namespace { |
6868 | ::std::vector<at::Tensor> wrapper_CompositeImplicitAutograd__unflatten_dense_tensors(const at::Tensor & flat, at::TensorList tensors) { |
6869 | // No device check |
6870 | // DeviceGuard omitted |
6871 | return at::native::unflatten_dense_tensors(flat, tensors); |
6872 | } |
6873 | } // anonymous namespace |
6874 | namespace { |
6875 | at::Tensor wrapper_CompositeImplicitAutograd__scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal) { |
6876 | // No device check |
6877 | // DeviceGuard omitted |
6878 | return at::native::scaled_dot_product_attention(query, key, value, attn_mask, dropout_p, is_causal); |
6879 | } |
6880 | } // anonymous namespace |
6881 | namespace { |
6882 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool need_attn_weights, bool is_causal) { |
6883 | // No device check |
6884 | // DeviceGuard omitted |
6885 | return at::native::_scaled_dot_product_attention(query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal); |
6886 | } |
6887 | } // anonymous namespace |
6888 | namespace { |
6889 | ::std::tuple<at::Tensor,at::Tensor> wrapper_CompositeImplicitAutograd___scaled_dot_product_attention_math(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const c10::optional<at::Tensor> & dropout_mask) { |
6890 | // No device check |
6891 | // DeviceGuard omitted |
6892 | return at::native::_scaled_dot_product_attention_math(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask); |
6893 | } |
6894 | } // anonymous namespace |
6895 | namespace { |
6896 | at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n) { |
6897 | // No device check |
6898 | // DeviceGuard omitted |
6899 | return at::native::special_chebyshev_polynomial_t(x, n); |
6900 | } |
6901 | } // anonymous namespace |
6902 | namespace { |
6903 | at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_t_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
6904 | // No device check |
6905 | // DeviceGuard omitted |
6906 | return at::native::special_chebyshev_polynomial_t_out(x, n, out); |
6907 | } |
6908 | } // anonymous namespace |
6909 | namespace { |
6910 | at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n) { |
6911 | // No device check |
6912 | // DeviceGuard omitted |
6913 | return at::native::special_chebyshev_polynomial_t(x, n); |
6914 | } |
6915 | } // anonymous namespace |
6916 | namespace { |
6917 | at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_u(const at::Scalar & x, const at::Tensor & n) { |
6918 | // No device check |
6919 | // DeviceGuard omitted |
6920 | return at::native::special_chebyshev_polynomial_u(x, n); |
6921 | } |
6922 | } // anonymous namespace |
6923 | namespace { |
6924 | at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_u_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
6925 | // No device check |
6926 | // DeviceGuard omitted |
6927 | return at::native::special_chebyshev_polynomial_u_out(x, n, out); |
6928 | } |
6929 | } // anonymous namespace |
6930 | namespace { |
6931 | at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_u(const at::Tensor & x, const at::Scalar & n) { |
6932 | // No device check |
6933 | // DeviceGuard omitted |
6934 | return at::native::special_chebyshev_polynomial_u(x, n); |
6935 | } |
6936 | } // anonymous namespace |
6937 | namespace { |
6938 | at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_v(const at::Scalar & x, const at::Tensor & n) { |
6939 | // No device check |
6940 | // DeviceGuard omitted |
6941 | return at::native::special_chebyshev_polynomial_v(x, n); |
6942 | } |
6943 | } // anonymous namespace |
6944 | namespace { |
6945 | at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_v_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
6946 | // No device check |
6947 | // DeviceGuard omitted |
6948 | return at::native::special_chebyshev_polynomial_v_out(x, n, out); |
6949 | } |
6950 | } // anonymous namespace |
6951 | namespace { |
6952 | at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_v(const at::Tensor & x, const at::Scalar & n) { |
6953 | // No device check |
6954 | // DeviceGuard omitted |
6955 | return at::native::special_chebyshev_polynomial_v(x, n); |
6956 | } |
6957 | } // anonymous namespace |
6958 | namespace { |
6959 | at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_w(const at::Scalar & x, const at::Tensor & n) { |
6960 | // No device check |
6961 | // DeviceGuard omitted |
6962 | return at::native::special_chebyshev_polynomial_w(x, n); |
6963 | } |
6964 | } // anonymous namespace |
6965 | namespace { |
6966 | at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_w_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
6967 | // No device check |
6968 | // DeviceGuard omitted |
6969 | return at::native::special_chebyshev_polynomial_w_out(x, n, out); |
6970 | } |
6971 | } // anonymous namespace |
6972 | namespace { |
6973 | at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_w(const at::Tensor & x, const at::Scalar & n) { |
6974 | // No device check |
6975 | // DeviceGuard omitted |
6976 | return at::native::special_chebyshev_polynomial_w(x, n); |
6977 | } |
6978 | } // anonymous namespace |
6979 | namespace { |
6980 | at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_hermite_polynomial_h(const at::Scalar & x, const at::Tensor & n) { |
6981 | // No device check |
6982 | // DeviceGuard omitted |
6983 | return at::native::special_hermite_polynomial_h(x, n); |
6984 | } |
6985 | } // anonymous namespace |
6986 | namespace { |
6987 | at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_h_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
6988 | // No device check |
6989 | // DeviceGuard omitted |
6990 | return at::native::special_hermite_polynomial_h_out(x, n, out); |
6991 | } |
6992 | } // anonymous namespace |
6993 | namespace { |
6994 | at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_hermite_polynomial_h(const at::Tensor & x, const at::Scalar & n) { |
6995 | // No device check |
6996 | // DeviceGuard omitted |
6997 | return at::native::special_hermite_polynomial_h(x, n); |
6998 | } |
6999 | } // anonymous namespace |
7000 | namespace { |
7001 | at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_hermite_polynomial_he(const at::Scalar & x, const at::Tensor & n) { |
7002 | // No device check |
7003 | // DeviceGuard omitted |
7004 | return at::native::special_hermite_polynomial_he(x, n); |
7005 | } |
7006 | } // anonymous namespace |
7007 | namespace { |
7008 | at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_he_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
7009 | // No device check |
7010 | // DeviceGuard omitted |
7011 | return at::native::special_hermite_polynomial_he_out(x, n, out); |
7012 | } |
7013 | } // anonymous namespace |
7014 | namespace { |
7015 | at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_hermite_polynomial_he(const at::Tensor & x, const at::Scalar & n) { |
7016 | // No device check |
7017 | // DeviceGuard omitted |
7018 | return at::native::special_hermite_polynomial_he(x, n); |
7019 | } |
7020 | } // anonymous namespace |
7021 | namespace { |
7022 | at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_laguerre_polynomial_l(const at::Scalar & x, const at::Tensor & n) { |
7023 | // No device check |
7024 | // DeviceGuard omitted |
7025 | return at::native::special_laguerre_polynomial_l(x, n); |
7026 | } |
7027 | } // anonymous namespace |
7028 | namespace { |
7029 | at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_laguerre_polynomial_l_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
7030 | // No device check |
7031 | // DeviceGuard omitted |
7032 | return at::native::special_laguerre_polynomial_l_out(x, n, out); |
7033 | } |
7034 | } // anonymous namespace |
7035 | namespace { |
7036 | at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_laguerre_polynomial_l(const at::Tensor & x, const at::Scalar & n) { |
7037 | // No device check |
7038 | // DeviceGuard omitted |
7039 | return at::native::special_laguerre_polynomial_l(x, n); |
7040 | } |
7041 | } // anonymous namespace |
7042 | namespace { |
7043 | at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_legendre_polynomial_p(const at::Scalar & x, const at::Tensor & n) { |
7044 | // No device check |
7045 | // DeviceGuard omitted |
7046 | return at::native::special_legendre_polynomial_p(x, n); |
7047 | } |
7048 | } // anonymous namespace |
7049 | namespace { |
7050 | at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_legendre_polynomial_p_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
7051 | // No device check |
7052 | // DeviceGuard omitted |
7053 | return at::native::special_legendre_polynomial_p_out(x, n, out); |
7054 | } |
7055 | } // anonymous namespace |
7056 | namespace { |
7057 | at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_legendre_polynomial_p(const at::Tensor & x, const at::Scalar & n) { |
7058 | // No device check |
7059 | // DeviceGuard omitted |
7060 | return at::native::special_legendre_polynomial_p(x, n); |
7061 | } |
7062 | } // anonymous namespace |
7063 | namespace { |
7064 | at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n) { |
7065 | // No device check |
7066 | // DeviceGuard omitted |
7067 | return at::native::special_shifted_chebyshev_polynomial_t(x, n); |
7068 | } |
7069 | } // anonymous namespace |
7070 | namespace { |
7071 | at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_t_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
7072 | // No device check |
7073 | // DeviceGuard omitted |
7074 | return at::native::special_shifted_chebyshev_polynomial_t_out(x, n, out); |
7075 | } |
7076 | } // anonymous namespace |
7077 | namespace { |
7078 | at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n) { |
7079 | // No device check |
7080 | // DeviceGuard omitted |
7081 | return at::native::special_shifted_chebyshev_polynomial_t(x, n); |
7082 | } |
7083 | } // anonymous namespace |
7084 | namespace { |
7085 | at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_u(const at::Scalar & x, const at::Tensor & n) { |
7086 | // No device check |
7087 | // DeviceGuard omitted |
7088 | return at::native::special_shifted_chebyshev_polynomial_u(x, n); |
7089 | } |
7090 | } // anonymous namespace |
7091 | namespace { |
7092 | at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_u_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
7093 | // No device check |
7094 | // DeviceGuard omitted |
7095 | return at::native::special_shifted_chebyshev_polynomial_u_out(x, n, out); |
7096 | } |
7097 | } // anonymous namespace |
7098 | namespace { |
7099 | at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Scalar & n) { |
7100 | // No device check |
7101 | // DeviceGuard omitted |
7102 | return at::native::special_shifted_chebyshev_polynomial_u(x, n); |
7103 | } |
7104 | } // anonymous namespace |
7105 | namespace { |
7106 | at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_v(const at::Scalar & x, const at::Tensor & n) { |
7107 | // No device check |
7108 | // DeviceGuard omitted |
7109 | return at::native::special_shifted_chebyshev_polynomial_v(x, n); |
7110 | } |
7111 | } // anonymous namespace |
7112 | namespace { |
7113 | at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_v_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
7114 | // No device check |
7115 | // DeviceGuard omitted |
7116 | return at::native::special_shifted_chebyshev_polynomial_v_out(x, n, out); |
7117 | } |
7118 | } // anonymous namespace |
7119 | namespace { |
7120 | at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Scalar & n) { |
7121 | // No device check |
7122 | // DeviceGuard omitted |
7123 | return at::native::special_shifted_chebyshev_polynomial_v(x, n); |
7124 | } |
7125 | } // anonymous namespace |
7126 | namespace { |
7127 | at::Tensor wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_w(const at::Scalar & x, const at::Tensor & n) { |
7128 | // No device check |
7129 | // DeviceGuard omitted |
7130 | return at::native::special_shifted_chebyshev_polynomial_w(x, n); |
7131 | } |
7132 | } // anonymous namespace |
7133 | namespace { |
7134 | at::Tensor & wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_w_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
7135 | // No device check |
7136 | // DeviceGuard omitted |
7137 | return at::native::special_shifted_chebyshev_polynomial_w_out(x, n, out); |
7138 | } |
7139 | } // anonymous namespace |
7140 | namespace { |
7141 | at::Tensor wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Scalar & n) { |
7142 | // No device check |
7143 | // DeviceGuard omitted |
7144 | return at::native::special_shifted_chebyshev_polynomial_w(x, n); |
7145 | } |
7146 | } // anonymous namespace |
7147 | TORCH_LIBRARY_IMPL(aten, CompositeImplicitAutograd, m) { |
7148 | m.impl("_cast_Byte" , |
7149 | TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Byte)); |
7150 | m.impl("_cast_Char" , |
7151 | TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Char)); |
7152 | m.impl("_cast_Double" , |
7153 | TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Double)); |
7154 | m.impl("_cast_Float" , |
7155 | TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Float)); |
7156 | m.impl("_cast_Int" , |
7157 | TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Int)); |
7158 | m.impl("_cast_Long" , |
7159 | TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Long)); |
7160 | m.impl("_cast_Short" , |
7161 | TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Short)); |
7162 | m.impl("_cast_Half" , |
7163 | TORCH_FN(wrapper_CompositeImplicitAutograd___cast_Half)); |
7164 | m.impl("_backward" , |
7165 | TORCH_FN(wrapper_CompositeImplicitAutograd___backward)); |
7166 | m.impl("set_data" , |
7167 | TORCH_FN(wrapper_CompositeImplicitAutograd__set_data)); |
7168 | m.impl("data" , |
7169 | TORCH_FN(wrapper_CompositeImplicitAutograd__data)); |
7170 | m.impl("is_leaf" , |
7171 | TORCH_FN(wrapper_CompositeImplicitAutograd__is_leaf)); |
7172 | m.impl("output_nr" , |
7173 | TORCH_FN(wrapper_CompositeImplicitAutograd__output_nr)); |
7174 | m.impl("_version" , |
7175 | TORCH_FN(wrapper_CompositeImplicitAutograd___version)); |
7176 | m.impl("requires_grad_" , |
7177 | TORCH_FN(wrapper_CompositeImplicitAutograd__requires_grad_)); |
7178 | m.impl("retain_grad" , |
7179 | TORCH_FN(wrapper_CompositeImplicitAutograd__retain_grad)); |
7180 | m.impl("retains_grad" , |
7181 | TORCH_FN(wrapper_CompositeImplicitAutograd__retains_grad)); |
7182 | m.impl("_unpack_dual" , |
7183 | TORCH_FN(wrapper_CompositeImplicitAutograd___unpack_dual)); |
7184 | m.impl("rename_" , |
7185 | TORCH_FN(wrapper_CompositeImplicitAutograd__rename_)); |
7186 | m.impl("rename" , |
7187 | TORCH_FN(wrapper_CompositeImplicitAutograd__rename)); |
7188 | m.impl("align_to" , |
7189 | TORCH_FN(wrapper_CompositeImplicitAutograd__align_to)); |
7190 | m.impl("align_to.ellipsis_idx" , |
7191 | TORCH_FN(wrapper_CompositeImplicitAutograd_ellipsis_idx_align_to)); |
7192 | m.impl("align_as" , |
7193 | TORCH_FN(wrapper_CompositeImplicitAutograd__align_as)); |
7194 | m.impl("align_tensors" , |
7195 | TORCH_FN(wrapper_CompositeImplicitAutograd__align_tensors)); |
7196 | m.impl("_assert_tensor_metadata" , |
7197 | TORCH_FN(wrapper_CompositeImplicitAutograd___assert_tensor_metadata)); |
7198 | m.impl("refine_names" , |
7199 | TORCH_FN(wrapper_CompositeImplicitAutograd__refine_names)); |
7200 | m.impl("_use_cudnn_rnn_flatten_weight" , |
7201 | TORCH_FN(wrapper_CompositeImplicitAutograd___use_cudnn_rnn_flatten_weight)); |
7202 | m.impl("_debug_has_internal_overlap" , |
7203 | TORCH_FN(wrapper_CompositeImplicitAutograd___debug_has_internal_overlap)); |
7204 | m.impl("_sobol_engine_draw" , |
7205 | TORCH_FN(wrapper_CompositeImplicitAutograd___sobol_engine_draw)); |
7206 | m.impl("_sobol_engine_ff_" , |
7207 | TORCH_FN(wrapper_CompositeImplicitAutograd___sobol_engine_ff_)); |
7208 | m.impl("_sobol_engine_scramble_" , |
7209 | TORCH_FN(wrapper_CompositeImplicitAutograd___sobol_engine_scramble_)); |
7210 | m.impl("_sobol_engine_initialize_state_" , |
7211 | TORCH_FN(wrapper_CompositeImplicitAutograd___sobol_engine_initialize_state_)); |
7212 | m.impl("_reshape_from_tensor" , |
7213 | TORCH_FN(wrapper_CompositeImplicitAutograd___reshape_from_tensor)); |
7214 | m.impl("_shape_as_tensor" , |
7215 | TORCH_FN(wrapper_CompositeImplicitAutograd___shape_as_tensor)); |
7216 | m.impl("dropout" , |
7217 | TORCH_FN(wrapper_CompositeImplicitAutograd__dropout)); |
7218 | m.impl("dropout_" , |
7219 | TORCH_FN(wrapper_CompositeImplicitAutograd__dropout_)); |
7220 | m.impl("feature_dropout" , |
7221 | TORCH_FN(wrapper_CompositeImplicitAutograd__feature_dropout)); |
7222 | m.impl("feature_dropout_" , |
7223 | TORCH_FN(wrapper_CompositeImplicitAutograd__feature_dropout_)); |
7224 | m.impl("alpha_dropout" , |
7225 | TORCH_FN(wrapper_CompositeImplicitAutograd__alpha_dropout)); |
7226 | m.impl("alpha_dropout_" , |
7227 | TORCH_FN(wrapper_CompositeImplicitAutograd__alpha_dropout_)); |
7228 | m.impl("feature_alpha_dropout" , |
7229 | TORCH_FN(wrapper_CompositeImplicitAutograd__feature_alpha_dropout)); |
7230 | m.impl("feature_alpha_dropout_" , |
7231 | TORCH_FN(wrapper_CompositeImplicitAutograd__feature_alpha_dropout_)); |
7232 | m.impl("absolute" , |
7233 | TORCH_FN(wrapper_CompositeImplicitAutograd__absolute)); |
7234 | m.impl("absolute.out" , |
7235 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_absolute_out)); |
7236 | m.impl("absolute_" , |
7237 | TORCH_FN(wrapper_CompositeImplicitAutograd__absolute_)); |
7238 | m.impl("chalf" , |
7239 | TORCH_FN(wrapper_CompositeImplicitAutograd__chalf)); |
7240 | m.impl("real" , |
7241 | TORCH_FN(wrapper_CompositeImplicitAutograd__real)); |
7242 | m.impl("imag" , |
7243 | TORCH_FN(wrapper_CompositeImplicitAutograd__imag)); |
7244 | m.impl("conj" , |
7245 | TORCH_FN(wrapper_CompositeImplicitAutograd__conj)); |
7246 | m.impl("conj_physical" , |
7247 | TORCH_FN(wrapper_CompositeImplicitAutograd__conj_physical)); |
7248 | m.impl("resolve_conj" , |
7249 | TORCH_FN(wrapper_CompositeImplicitAutograd__resolve_conj)); |
7250 | m.impl("resolve_neg" , |
7251 | TORCH_FN(wrapper_CompositeImplicitAutograd__resolve_neg)); |
7252 | m.impl("arccos" , |
7253 | TORCH_FN(wrapper_CompositeImplicitAutograd__arccos)); |
7254 | m.impl("arccos.out" , |
7255 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_arccos_out)); |
7256 | m.impl("arccos_" , |
7257 | TORCH_FN(wrapper_CompositeImplicitAutograd__arccos_)); |
7258 | m.impl("avg_pool1d" , |
7259 | TORCH_FN(wrapper_CompositeImplicitAutograd__avg_pool1d)); |
7260 | m.impl("adaptive_avg_pool1d" , |
7261 | TORCH_FN(wrapper_CompositeImplicitAutograd__adaptive_avg_pool1d)); |
7262 | m.impl("adaptive_max_pool1d" , |
7263 | TORCH_FN(wrapper_CompositeImplicitAutograd__adaptive_max_pool1d)); |
7264 | m.impl("affine_grid_generator_backward" , |
7265 | TORCH_FN(wrapper_CompositeImplicitAutograd__affine_grid_generator_backward)); |
7266 | m.impl("_test_check_tensor" , |
7267 | TORCH_FN(wrapper_CompositeImplicitAutograd___test_check_tensor)); |
7268 | m.impl("all.dimname" , |
7269 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_all)); |
7270 | m.impl("all.dimname_out" , |
7271 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_all_out)); |
7272 | m.impl("any.dimname" , |
7273 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_any)); |
7274 | m.impl("any.dimname_out" , |
7275 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_any_out)); |
7276 | m.impl("_dim_arange" , |
7277 | TORCH_FN(wrapper_CompositeImplicitAutograd___dim_arange)); |
7278 | m.impl("arccosh" , |
7279 | TORCH_FN(wrapper_CompositeImplicitAutograd__arccosh)); |
7280 | m.impl("arccosh.out" , |
7281 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_arccosh_out)); |
7282 | m.impl("arccosh_" , |
7283 | TORCH_FN(wrapper_CompositeImplicitAutograd__arccosh_)); |
7284 | m.impl("arcsinh" , |
7285 | TORCH_FN(wrapper_CompositeImplicitAutograd__arcsinh)); |
7286 | m.impl("arcsinh.out" , |
7287 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_arcsinh_out)); |
7288 | m.impl("arcsinh_" , |
7289 | TORCH_FN(wrapper_CompositeImplicitAutograd__arcsinh_)); |
7290 | m.impl("arctanh" , |
7291 | TORCH_FN(wrapper_CompositeImplicitAutograd__arctanh)); |
7292 | m.impl("arctanh.out" , |
7293 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_arctanh_out)); |
7294 | m.impl("arctanh_" , |
7295 | TORCH_FN(wrapper_CompositeImplicitAutograd__arctanh_)); |
7296 | m.impl("arcsin" , |
7297 | TORCH_FN(wrapper_CompositeImplicitAutograd__arcsin)); |
7298 | m.impl("arcsin.out" , |
7299 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_arcsin_out)); |
7300 | m.impl("arcsin_" , |
7301 | TORCH_FN(wrapper_CompositeImplicitAutograd__arcsin_)); |
7302 | m.impl("arctan" , |
7303 | TORCH_FN(wrapper_CompositeImplicitAutograd__arctan)); |
7304 | m.impl("arctan.out" , |
7305 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_arctan_out)); |
7306 | m.impl("arctan_" , |
7307 | TORCH_FN(wrapper_CompositeImplicitAutograd__arctan_)); |
7308 | m.impl("atleast_1d" , |
7309 | TORCH_FN(wrapper_CompositeImplicitAutograd__atleast_1d)); |
7310 | m.impl("atleast_1d.Sequence" , |
7311 | TORCH_FN(wrapper_CompositeImplicitAutograd_Sequence_atleast_1d)); |
7312 | m.impl("atleast_2d" , |
7313 | TORCH_FN(wrapper_CompositeImplicitAutograd__atleast_2d)); |
7314 | m.impl("atleast_2d.Sequence" , |
7315 | TORCH_FN(wrapper_CompositeImplicitAutograd_Sequence_atleast_2d)); |
7316 | m.impl("atleast_3d" , |
7317 | TORCH_FN(wrapper_CompositeImplicitAutograd__atleast_3d)); |
7318 | m.impl("atleast_3d.Sequence" , |
7319 | TORCH_FN(wrapper_CompositeImplicitAutograd_Sequence_atleast_3d)); |
7320 | m.impl("batch_norm" , |
7321 | TORCH_FN(wrapper_CompositeImplicitAutograd__batch_norm)); |
7322 | m.impl("_batch_norm_impl_index" , |
7323 | TORCH_FN(wrapper_CompositeImplicitAutograd___batch_norm_impl_index)); |
7324 | m.impl("_batch_norm_impl_index_backward" , |
7325 | TORCH_FN(wrapper_CompositeImplicitAutograd___batch_norm_impl_index_backward)); |
7326 | m.impl("bilinear" , |
7327 | TORCH_FN(wrapper_CompositeImplicitAutograd__bilinear)); |
7328 | m.impl("broadcast_tensors" , |
7329 | TORCH_FN(wrapper_CompositeImplicitAutograd__broadcast_tensors)); |
7330 | m.impl("broadcast_to" , |
7331 | TORCH_FN(wrapper_CompositeImplicitAutograd__broadcast_to)); |
7332 | m.impl("cat.names" , |
7333 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_cat)); |
7334 | m.impl("cat.names_out" , |
7335 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_cat_out)); |
7336 | m.impl("concat" , |
7337 | TORCH_FN(wrapper_CompositeImplicitAutograd__concat)); |
7338 | m.impl("concat.out" , |
7339 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_concat_out)); |
7340 | m.impl("concat.names" , |
7341 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_concat)); |
7342 | m.impl("concat.names_out" , |
7343 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_concat_out)); |
7344 | m.impl("concatenate" , |
7345 | TORCH_FN(wrapper_CompositeImplicitAutograd__concatenate)); |
7346 | m.impl("concatenate.out" , |
7347 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_concatenate_out)); |
7348 | m.impl("concatenate.names" , |
7349 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_concatenate)); |
7350 | m.impl("concatenate.names_out" , |
7351 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_concatenate_out)); |
7352 | m.impl("chain_matmul" , |
7353 | TORCH_FN(wrapper_CompositeImplicitAutograd__chain_matmul)); |
7354 | m.impl("chain_matmul.out" , |
7355 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_chain_matmul_out)); |
7356 | m.impl("unsafe_chunk" , |
7357 | TORCH_FN(wrapper_CompositeImplicitAutograd__unsafe_chunk)); |
7358 | m.impl("chunk" , |
7359 | TORCH_FN(wrapper_CompositeImplicitAutograd__chunk)); |
7360 | m.impl("tensor_split.sections" , |
7361 | TORCH_FN(wrapper_CompositeImplicitAutograd_sections_tensor_split)); |
7362 | m.impl("tensor_split.indices" , |
7363 | TORCH_FN(wrapper_CompositeImplicitAutograd_indices_tensor_split)); |
7364 | m.impl("tensor_split.tensor_indices_or_sections" , |
7365 | TORCH_FN(wrapper_CompositeImplicitAutograd_tensor_indices_or_sections_tensor_split)); |
7366 | m.impl("clip" , |
7367 | TORCH_FN(wrapper_CompositeImplicitAutograd__clip)); |
7368 | m.impl("clip.out" , |
7369 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_clip_out)); |
7370 | m.impl("clip_" , |
7371 | TORCH_FN(wrapper_CompositeImplicitAutograd__clip_)); |
7372 | m.impl("clip.Tensor" , |
7373 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_clip)); |
7374 | m.impl("clip.Tensor_out" , |
7375 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_out_clip_out)); |
7376 | m.impl("clip_.Tensor" , |
7377 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_clip_)); |
7378 | m.impl("cudnn_is_acceptable" , |
7379 | TORCH_FN(wrapper_CompositeImplicitAutograd__cudnn_is_acceptable)); |
7380 | m.impl("contiguous" , |
7381 | TORCH_FN(wrapper_CompositeImplicitAutograd__contiguous)); |
7382 | m.impl("_convolution.deprecated" , |
7383 | TORCH_FN(wrapper_CompositeImplicitAutograd_deprecated__convolution)); |
7384 | m.impl("_convolution_mode" , |
7385 | TORCH_FN(wrapper_CompositeImplicitAutograd___convolution_mode)); |
7386 | m.impl("_convolution_double_backward" , |
7387 | TORCH_FN(wrapper_CompositeImplicitAutograd___convolution_double_backward)); |
7388 | m.impl("conv1d" , |
7389 | TORCH_FN(wrapper_CompositeImplicitAutograd__conv1d)); |
7390 | m.impl("conv2d" , |
7391 | TORCH_FN(wrapper_CompositeImplicitAutograd__conv2d)); |
7392 | m.impl("conv3d" , |
7393 | TORCH_FN(wrapper_CompositeImplicitAutograd__conv3d)); |
7394 | m.impl("conv1d.padding" , |
7395 | TORCH_FN(wrapper_CompositeImplicitAutograd_padding_conv1d)); |
7396 | m.impl("conv2d.padding" , |
7397 | TORCH_FN(wrapper_CompositeImplicitAutograd_padding_conv2d)); |
7398 | m.impl("conv3d.padding" , |
7399 | TORCH_FN(wrapper_CompositeImplicitAutograd_padding_conv3d)); |
7400 | m.impl("conv_tbc_backward" , |
7401 | TORCH_FN(wrapper_CompositeImplicitAutograd__conv_tbc_backward)); |
7402 | m.impl("conv_transpose1d" , |
7403 | TORCH_FN(wrapper_CompositeImplicitAutograd__conv_transpose1d)); |
7404 | m.impl("conv_transpose2d.input" , |
7405 | TORCH_FN(wrapper_CompositeImplicitAutograd_input_conv_transpose2d)); |
7406 | m.impl("conv_transpose3d.input" , |
7407 | TORCH_FN(wrapper_CompositeImplicitAutograd_input_conv_transpose3d)); |
7408 | m.impl("cosine_embedding_loss" , |
7409 | TORCH_FN(wrapper_CompositeImplicitAutograd__cosine_embedding_loss)); |
7410 | m.impl("cov" , |
7411 | TORCH_FN(wrapper_CompositeImplicitAutograd__cov)); |
7412 | m.impl("corrcoef" , |
7413 | TORCH_FN(wrapper_CompositeImplicitAutograd__corrcoef)); |
7414 | m.impl("cummax.dimname" , |
7415 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_cummax)); |
7416 | m.impl("cummax.dimname_out" , |
7417 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_cummax_out)); |
7418 | m.impl("cummin.dimname" , |
7419 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_cummin)); |
7420 | m.impl("cummin.dimname_out" , |
7421 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_cummin_out)); |
7422 | m.impl("cummaxmin_backward" , |
7423 | TORCH_FN(wrapper_CompositeImplicitAutograd__cummaxmin_backward)); |
7424 | m.impl("cumprod.dimname" , |
7425 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_cumprod)); |
7426 | m.impl("cumprod.dimname_out" , |
7427 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_cumprod_out)); |
7428 | m.impl("cumprod_.dimname" , |
7429 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_cumprod_)); |
7430 | m.impl("cumprod_backward" , |
7431 | TORCH_FN(wrapper_CompositeImplicitAutograd__cumprod_backward)); |
7432 | m.impl("cumsum.dimname" , |
7433 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_cumsum)); |
7434 | m.impl("cumsum.dimname_out" , |
7435 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_cumsum_out)); |
7436 | m.impl("cumsum_.dimname" , |
7437 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_cumsum_)); |
7438 | m.impl("cumulative_trapezoid.x" , |
7439 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_cumulative_trapezoid)); |
7440 | m.impl("cumulative_trapezoid.dx" , |
7441 | TORCH_FN(wrapper_CompositeImplicitAutograd_dx_cumulative_trapezoid)); |
7442 | m.impl("ctc_loss.IntList" , |
7443 | TORCH_FN(wrapper_CompositeImplicitAutograd_IntList_ctc_loss)); |
7444 | m.impl("ctc_loss.Tensor" , |
7445 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_ctc_loss)); |
7446 | m.impl("diagflat" , |
7447 | TORCH_FN(wrapper_CompositeImplicitAutograd__diagflat)); |
7448 | m.impl("linalg_diagonal" , |
7449 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_diagonal)); |
7450 | m.impl("diagonal.Dimname" , |
7451 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_diagonal)); |
7452 | m.impl("fill_diagonal_" , |
7453 | TORCH_FN(wrapper_CompositeImplicitAutograd__fill_diagonal_)); |
7454 | m.impl("diff" , |
7455 | TORCH_FN(wrapper_CompositeImplicitAutograd__diff)); |
7456 | m.impl("diff.out" , |
7457 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_diff_out)); |
7458 | m.impl("gradient.scalarint" , |
7459 | TORCH_FN(wrapper_CompositeImplicitAutograd_scalarint_gradient)); |
7460 | m.impl("gradient.scalararray" , |
7461 | TORCH_FN(wrapper_CompositeImplicitAutograd_scalararray_gradient)); |
7462 | m.impl("gradient.array" , |
7463 | TORCH_FN(wrapper_CompositeImplicitAutograd_array_gradient)); |
7464 | m.impl("gradient.scalarrayint" , |
7465 | TORCH_FN(wrapper_CompositeImplicitAutograd_scalarrayint_gradient)); |
7466 | m.impl("gradient.scalarrayarray" , |
7467 | TORCH_FN(wrapper_CompositeImplicitAutograd_scalarrayarray_gradient)); |
7468 | m.impl("gradient.tensorarrayint" , |
7469 | TORCH_FN(wrapper_CompositeImplicitAutograd_tensorarrayint_gradient)); |
7470 | m.impl("gradient.tensorarray" , |
7471 | TORCH_FN(wrapper_CompositeImplicitAutograd_tensorarray_gradient)); |
7472 | m.impl("divide.Tensor" , |
7473 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_divide)); |
7474 | m.impl("divide.out" , |
7475 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_divide_out)); |
7476 | m.impl("divide_.Tensor" , |
7477 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_divide_)); |
7478 | m.impl("divide.Scalar" , |
7479 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_divide)); |
7480 | m.impl("divide_.Scalar" , |
7481 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_divide_)); |
7482 | m.impl("divide.Tensor_mode" , |
7483 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_mode_divide)); |
7484 | m.impl("divide.out_mode" , |
7485 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_mode_divide_out)); |
7486 | m.impl("divide_.Tensor_mode" , |
7487 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_mode_divide_)); |
7488 | m.impl("divide.Scalar_mode" , |
7489 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_mode_divide)); |
7490 | m.impl("divide_.Scalar_mode" , |
7491 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_mode_divide_)); |
7492 | m.impl("true_divide.Tensor" , |
7493 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_true_divide)); |
7494 | m.impl("true_divide.out" , |
7495 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_true_divide_out)); |
7496 | m.impl("true_divide_.Tensor" , |
7497 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_true_divide_)); |
7498 | m.impl("true_divide.Scalar" , |
7499 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_true_divide)); |
7500 | m.impl("true_divide_.Scalar" , |
7501 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_true_divide_)); |
7502 | m.impl("einsum" , |
7503 | TORCH_FN(wrapper_CompositeImplicitAutograd__einsum)); |
7504 | m.impl("embedding_backward" , |
7505 | TORCH_FN(wrapper_CompositeImplicitAutograd__embedding_backward)); |
7506 | m.impl("embedding_sparse_backward" , |
7507 | TORCH_FN(wrapper_CompositeImplicitAutograd__embedding_sparse_backward)); |
7508 | m.impl("_rowwise_prune" , |
7509 | TORCH_FN(wrapper_CompositeImplicitAutograd___rowwise_prune)); |
7510 | m.impl("row_stack" , |
7511 | TORCH_FN(wrapper_CompositeImplicitAutograd__row_stack)); |
7512 | m.impl("row_stack.out" , |
7513 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_row_stack_out)); |
7514 | m.impl("embedding_bag" , |
7515 | TORCH_FN(wrapper_CompositeImplicitAutograd__embedding_bag)); |
7516 | m.impl("embedding_bag.padding_idx" , |
7517 | TORCH_FN(wrapper_CompositeImplicitAutograd_padding_idx_embedding_bag)); |
7518 | m.impl("_embedding_bag_backward" , |
7519 | TORCH_FN(wrapper_CompositeImplicitAutograd___embedding_bag_backward)); |
7520 | m.impl("_embedding_bag_sparse_backward" , |
7521 | TORCH_FN(wrapper_CompositeImplicitAutograd___embedding_bag_sparse_backward)); |
7522 | m.impl("empty.out" , |
7523 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_empty_out)); |
7524 | m.impl("expand_as" , |
7525 | TORCH_FN(wrapper_CompositeImplicitAutograd__expand_as)); |
7526 | m.impl("flatten.using_ints" , |
7527 | TORCH_FN(wrapper_CompositeImplicitAutograd_using_ints_flatten)); |
7528 | m.impl("flatten.named_out_dim" , |
7529 | TORCH_FN(wrapper_CompositeImplicitAutograd_named_out_dim_flatten)); |
7530 | m.impl("flatten.using_names" , |
7531 | TORCH_FN(wrapper_CompositeImplicitAutograd_using_names_flatten)); |
7532 | m.impl("flatten.DimnameList" , |
7533 | TORCH_FN(wrapper_CompositeImplicitAutograd_DimnameList_flatten)); |
7534 | m.impl("unflatten.int" , |
7535 | TORCH_FN(wrapper_CompositeImplicitAutograd_int_unflatten)); |
7536 | m.impl("unflatten.Dimname" , |
7537 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_unflatten)); |
7538 | m.impl("floor_divide.Scalar" , |
7539 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_floor_divide)); |
7540 | m.impl("floor_divide_.Scalar" , |
7541 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_floor_divide_)); |
7542 | m.impl("grid_sampler" , |
7543 | TORCH_FN(wrapper_CompositeImplicitAutograd__grid_sampler)); |
7544 | m.impl("_grid_sampler_2d_cpu_fallback_backward" , |
7545 | TORCH_FN(wrapper_CompositeImplicitAutograd___grid_sampler_2d_cpu_fallback_backward)); |
7546 | m.impl("hinge_embedding_loss" , |
7547 | TORCH_FN(wrapper_CompositeImplicitAutograd__hinge_embedding_loss)); |
7548 | m.impl("group_norm" , |
7549 | TORCH_FN(wrapper_CompositeImplicitAutograd__group_norm)); |
7550 | m.impl("_cufft_get_plan_cache_size" , |
7551 | TORCH_FN(wrapper_CompositeImplicitAutograd___cufft_get_plan_cache_size)); |
7552 | m.impl("_cufft_get_plan_cache_max_size" , |
7553 | TORCH_FN(wrapper_CompositeImplicitAutograd___cufft_get_plan_cache_max_size)); |
7554 | m.impl("_cufft_set_plan_cache_max_size" , |
7555 | TORCH_FN(wrapper_CompositeImplicitAutograd___cufft_set_plan_cache_max_size)); |
7556 | m.impl("_cufft_clear_plan_cache" , |
7557 | TORCH_FN(wrapper_CompositeImplicitAutograd___cufft_clear_plan_cache)); |
7558 | m.impl("index_copy_.dimname" , |
7559 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_index_copy_)); |
7560 | m.impl("index_copy.dimname" , |
7561 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_index_copy)); |
7562 | m.impl("instance_norm" , |
7563 | TORCH_FN(wrapper_CompositeImplicitAutograd__instance_norm)); |
7564 | m.impl("isclose" , |
7565 | TORCH_FN(wrapper_CompositeImplicitAutograd__isclose)); |
7566 | m.impl("is_distributed" , |
7567 | TORCH_FN(wrapper_CompositeImplicitAutograd__is_distributed)); |
7568 | m.impl("is_floating_point" , |
7569 | TORCH_FN(wrapper_CompositeImplicitAutograd__is_floating_point)); |
7570 | m.impl("is_complex" , |
7571 | TORCH_FN(wrapper_CompositeImplicitAutograd__is_complex)); |
7572 | m.impl("is_conj" , |
7573 | TORCH_FN(wrapper_CompositeImplicitAutograd__is_conj)); |
7574 | m.impl("_is_zerotensor" , |
7575 | TORCH_FN(wrapper_CompositeImplicitAutograd___is_zerotensor)); |
7576 | m.impl("is_neg" , |
7577 | TORCH_FN(wrapper_CompositeImplicitAutograd__is_neg)); |
7578 | m.impl("isreal" , |
7579 | TORCH_FN(wrapper_CompositeImplicitAutograd__isreal)); |
7580 | m.impl("is_nonzero" , |
7581 | TORCH_FN(wrapper_CompositeImplicitAutograd__is_nonzero)); |
7582 | m.impl("is_signed" , |
7583 | TORCH_FN(wrapper_CompositeImplicitAutograd__is_signed)); |
7584 | m.impl("is_inference" , |
7585 | TORCH_FN(wrapper_CompositeImplicitAutograd__is_inference)); |
7586 | m.impl("kl_div" , |
7587 | TORCH_FN(wrapper_CompositeImplicitAutograd__kl_div)); |
7588 | m.impl("kron" , |
7589 | TORCH_FN(wrapper_CompositeImplicitAutograd__kron)); |
7590 | m.impl("kron.out" , |
7591 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_kron_out)); |
7592 | m.impl("kthvalue.dimname" , |
7593 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_kthvalue)); |
7594 | m.impl("kthvalue.dimname_out" , |
7595 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_kthvalue_out)); |
7596 | m.impl("layer_norm" , |
7597 | TORCH_FN(wrapper_CompositeImplicitAutograd__layer_norm)); |
7598 | m.impl("linear" , |
7599 | TORCH_FN(wrapper_CompositeImplicitAutograd__linear)); |
7600 | m.impl("fbgemm_linear_int8_weight_fp32_activation" , |
7601 | TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_linear_int8_weight_fp32_activation)); |
7602 | m.impl("fbgemm_linear_int8_weight" , |
7603 | TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_linear_int8_weight)); |
7604 | m.impl("fbgemm_linear_quantize_weight" , |
7605 | TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_linear_quantize_weight)); |
7606 | m.impl("fbgemm_pack_gemm_matrix_fp16" , |
7607 | TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_pack_gemm_matrix_fp16)); |
7608 | m.impl("fbgemm_linear_fp16_weight_fp32_activation" , |
7609 | TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_linear_fp16_weight_fp32_activation)); |
7610 | m.impl("fbgemm_linear_fp16_weight" , |
7611 | TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_linear_fp16_weight)); |
7612 | m.impl("fbgemm_pack_quantized_matrix" , |
7613 | TORCH_FN(wrapper_CompositeImplicitAutograd__fbgemm_pack_quantized_matrix)); |
7614 | m.impl("fbgemm_pack_quantized_matrix.KN" , |
7615 | TORCH_FN(wrapper_CompositeImplicitAutograd_KN_fbgemm_pack_quantized_matrix)); |
7616 | m.impl("ldexp.Tensor" , |
7617 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_ldexp)); |
7618 | m.impl("ldexp.out" , |
7619 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_ldexp_out)); |
7620 | m.impl("ldexp_" , |
7621 | TORCH_FN(wrapper_CompositeImplicitAutograd__ldexp_)); |
7622 | m.impl("log_softmax.int" , |
7623 | TORCH_FN(wrapper_CompositeImplicitAutograd_int_log_softmax)); |
7624 | m.impl("log_softmax.Dimname" , |
7625 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_log_softmax)); |
7626 | m.impl("logcumsumexp.dimname" , |
7627 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_logcumsumexp)); |
7628 | m.impl("logcumsumexp.dimname_out" , |
7629 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_logcumsumexp_out)); |
7630 | m.impl("logsumexp.names" , |
7631 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_logsumexp)); |
7632 | m.impl("logsumexp.names_out" , |
7633 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_logsumexp_out)); |
7634 | m.impl("margin_ranking_loss" , |
7635 | TORCH_FN(wrapper_CompositeImplicitAutograd__margin_ranking_loss)); |
7636 | m.impl("matmul" , |
7637 | TORCH_FN(wrapper_CompositeImplicitAutograd__matmul)); |
7638 | m.impl("matmul.out" , |
7639 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_matmul_out)); |
7640 | m.impl("matrix_power" , |
7641 | TORCH_FN(wrapper_CompositeImplicitAutograd__matrix_power)); |
7642 | m.impl("matrix_power.out" , |
7643 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_matrix_power_out)); |
7644 | m.impl("matrix_exp" , |
7645 | TORCH_FN(wrapper_CompositeImplicitAutograd__matrix_exp)); |
7646 | m.impl("matrix_exp_backward" , |
7647 | TORCH_FN(wrapper_CompositeImplicitAutograd__matrix_exp_backward)); |
7648 | m.impl("max.names_dim" , |
7649 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_max)); |
7650 | m.impl("max.names_dim_max" , |
7651 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_max_max_out)); |
7652 | m.impl("value_selecting_reduction_backward" , |
7653 | TORCH_FN(wrapper_CompositeImplicitAutograd__value_selecting_reduction_backward)); |
7654 | m.impl("max_pool1d_with_indices" , |
7655 | TORCH_FN(wrapper_CompositeImplicitAutograd__max_pool1d_with_indices)); |
7656 | m.impl("max_pool1d" , |
7657 | TORCH_FN(wrapper_CompositeImplicitAutograd__max_pool1d)); |
7658 | m.impl("max_pool2d" , |
7659 | TORCH_FN(wrapper_CompositeImplicitAutograd__max_pool2d)); |
7660 | m.impl("max_pool3d" , |
7661 | TORCH_FN(wrapper_CompositeImplicitAutograd__max_pool3d)); |
7662 | m.impl("mean.names_dim" , |
7663 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_mean)); |
7664 | m.impl("mean.names_out" , |
7665 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_mean_out)); |
7666 | m.impl("nanmean" , |
7667 | TORCH_FN(wrapper_CompositeImplicitAutograd__nanmean)); |
7668 | m.impl("nanmean.out" , |
7669 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_nanmean_out)); |
7670 | m.impl("median.names_dim" , |
7671 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_median)); |
7672 | m.impl("median.names_dim_values" , |
7673 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_values_median_out)); |
7674 | m.impl("nanmedian.names_dim" , |
7675 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_nanmedian)); |
7676 | m.impl("nanmedian.names_dim_values" , |
7677 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_values_nanmedian_out)); |
7678 | m.impl("min.names_dim" , |
7679 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_min)); |
7680 | m.impl("min.names_dim_min" , |
7681 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_min_min_out)); |
7682 | m.impl("_sparse_mm" , |
7683 | TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_mm)); |
7684 | m.impl("_sparse_mm.reduce" , |
7685 | TORCH_FN(wrapper_CompositeImplicitAutograd_reduce__sparse_mm)); |
7686 | m.impl("mode.dimname" , |
7687 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_mode)); |
7688 | m.impl("mode.dimname_out" , |
7689 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_mode_out)); |
7690 | m.impl("multiply.Tensor" , |
7691 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_multiply)); |
7692 | m.impl("multiply.out" , |
7693 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_multiply_out)); |
7694 | m.impl("multiply_.Tensor" , |
7695 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_multiply_)); |
7696 | m.impl("multiply.Scalar" , |
7697 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_multiply)); |
7698 | m.impl("multiply_.Scalar" , |
7699 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_multiply_)); |
7700 | m.impl("narrow" , |
7701 | TORCH_FN(wrapper_CompositeImplicitAutograd__narrow)); |
7702 | m.impl("narrow.Tensor" , |
7703 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_narrow)); |
7704 | m.impl("is_vulkan_available" , |
7705 | TORCH_FN(wrapper_CompositeImplicitAutograd__is_vulkan_available)); |
7706 | m.impl("_nnpack_available" , |
7707 | TORCH_FN(wrapper_CompositeImplicitAutograd___nnpack_available)); |
7708 | m.impl("pairwise_distance" , |
7709 | TORCH_FN(wrapper_CompositeImplicitAutograd__pairwise_distance)); |
7710 | m.impl("cdist" , |
7711 | TORCH_FN(wrapper_CompositeImplicitAutograd__cdist)); |
7712 | m.impl("pdist" , |
7713 | TORCH_FN(wrapper_CompositeImplicitAutograd__pdist)); |
7714 | m.impl("cosine_similarity" , |
7715 | TORCH_FN(wrapper_CompositeImplicitAutograd__cosine_similarity)); |
7716 | m.impl("movedim.intlist" , |
7717 | TORCH_FN(wrapper_CompositeImplicitAutograd_intlist_movedim)); |
7718 | m.impl("movedim.int" , |
7719 | TORCH_FN(wrapper_CompositeImplicitAutograd_int_movedim)); |
7720 | m.impl("moveaxis.intlist" , |
7721 | TORCH_FN(wrapper_CompositeImplicitAutograd_intlist_moveaxis)); |
7722 | m.impl("moveaxis.int" , |
7723 | TORCH_FN(wrapper_CompositeImplicitAutograd_int_moveaxis)); |
7724 | m.impl("numpy_T" , |
7725 | TORCH_FN(wrapper_CompositeImplicitAutograd__numpy_T)); |
7726 | m.impl("matrix_H" , |
7727 | TORCH_FN(wrapper_CompositeImplicitAutograd__matrix_H)); |
7728 | m.impl("mT" , |
7729 | TORCH_FN(wrapper_CompositeImplicitAutograd__mT)); |
7730 | m.impl("mH" , |
7731 | TORCH_FN(wrapper_CompositeImplicitAutograd__mH)); |
7732 | m.impl("adjoint" , |
7733 | TORCH_FN(wrapper_CompositeImplicitAutograd__adjoint)); |
7734 | m.impl("native_channel_shuffle" , |
7735 | TORCH_FN(wrapper_CompositeImplicitAutograd__native_channel_shuffle)); |
7736 | m.impl("pin_memory" , |
7737 | TORCH_FN(wrapper_CompositeImplicitAutograd__pin_memory)); |
7738 | m.impl("pinverse" , |
7739 | TORCH_FN(wrapper_CompositeImplicitAutograd__pinverse)); |
7740 | m.impl("poisson_nll_loss" , |
7741 | TORCH_FN(wrapper_CompositeImplicitAutograd__poisson_nll_loss)); |
7742 | m.impl("rand.generator_out" , |
7743 | TORCH_FN(wrapper_CompositeImplicitAutograd_generator_out_rand_out)); |
7744 | m.impl("randn.out" , |
7745 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_randn_out)); |
7746 | m.impl("randn.generator_out" , |
7747 | TORCH_FN(wrapper_CompositeImplicitAutograd_generator_out_randn_out)); |
7748 | m.impl("ravel" , |
7749 | TORCH_FN(wrapper_CompositeImplicitAutograd__ravel)); |
7750 | m.impl("negative" , |
7751 | TORCH_FN(wrapper_CompositeImplicitAutograd__negative)); |
7752 | m.impl("negative.out" , |
7753 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_negative_out)); |
7754 | m.impl("negative_" , |
7755 | TORCH_FN(wrapper_CompositeImplicitAutograd__negative_)); |
7756 | m.impl("repeat_interleave.self_Tensor" , |
7757 | TORCH_FN(wrapper_CompositeImplicitAutograd_self_Tensor_repeat_interleave)); |
7758 | m.impl("repeat_interleave.self_int" , |
7759 | TORCH_FN(wrapper_CompositeImplicitAutograd_self_int_repeat_interleave)); |
7760 | m.impl("reshape" , |
7761 | TORCH_FN(wrapper_CompositeImplicitAutograd__reshape)); |
7762 | m.impl("reshape_as" , |
7763 | TORCH_FN(wrapper_CompositeImplicitAutograd__reshape_as)); |
7764 | m.impl("rrelu" , |
7765 | TORCH_FN(wrapper_CompositeImplicitAutograd__rrelu)); |
7766 | m.impl("rrelu_" , |
7767 | TORCH_FN(wrapper_CompositeImplicitAutograd__rrelu_)); |
7768 | m.impl("relu6" , |
7769 | TORCH_FN(wrapper_CompositeImplicitAutograd__relu6)); |
7770 | m.impl("relu6_" , |
7771 | TORCH_FN(wrapper_CompositeImplicitAutograd__relu6_)); |
7772 | m.impl("prelu" , |
7773 | TORCH_FN(wrapper_CompositeImplicitAutograd__prelu)); |
7774 | m.impl("infinitely_differentiable_gelu_backward" , |
7775 | TORCH_FN(wrapper_CompositeImplicitAutograd__infinitely_differentiable_gelu_backward)); |
7776 | m.impl("select.Dimname" , |
7777 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_select)); |
7778 | m.impl("selu" , |
7779 | TORCH_FN(wrapper_CompositeImplicitAutograd__selu)); |
7780 | m.impl("selu_" , |
7781 | TORCH_FN(wrapper_CompositeImplicitAutograd__selu_)); |
7782 | m.impl("silu_backward" , |
7783 | TORCH_FN(wrapper_CompositeImplicitAutograd__silu_backward)); |
7784 | m.impl("mish_backward" , |
7785 | TORCH_FN(wrapper_CompositeImplicitAutograd__mish_backward)); |
7786 | m.impl("size.int" , |
7787 | TORCH_FN(wrapper_CompositeImplicitAutograd_int_size)); |
7788 | m.impl("size.Dimname" , |
7789 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_size)); |
7790 | m.impl("smm" , |
7791 | TORCH_FN(wrapper_CompositeImplicitAutograd__smm)); |
7792 | m.impl("softmax.int" , |
7793 | TORCH_FN(wrapper_CompositeImplicitAutograd_int_softmax)); |
7794 | m.impl("softmax.Dimname" , |
7795 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_softmax)); |
7796 | m.impl("split.sizes" , |
7797 | TORCH_FN(wrapper_CompositeImplicitAutograd_sizes_split)); |
7798 | m.impl("hsplit.int" , |
7799 | TORCH_FN(wrapper_CompositeImplicitAutograd_int_hsplit)); |
7800 | m.impl("hsplit.array" , |
7801 | TORCH_FN(wrapper_CompositeImplicitAutograd_array_hsplit)); |
7802 | m.impl("vsplit.int" , |
7803 | TORCH_FN(wrapper_CompositeImplicitAutograd_int_vsplit)); |
7804 | m.impl("vsplit.array" , |
7805 | TORCH_FN(wrapper_CompositeImplicitAutograd_array_vsplit)); |
7806 | m.impl("dsplit.int" , |
7807 | TORCH_FN(wrapper_CompositeImplicitAutograd_int_dsplit)); |
7808 | m.impl("dsplit.array" , |
7809 | TORCH_FN(wrapper_CompositeImplicitAutograd_array_dsplit)); |
7810 | m.impl("squeeze.dimname" , |
7811 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_squeeze)); |
7812 | m.impl("squeeze_.dimname" , |
7813 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_squeeze_)); |
7814 | m.impl("sspaddmm" , |
7815 | TORCH_FN(wrapper_CompositeImplicitAutograd__sspaddmm)); |
7816 | m.impl("hstack" , |
7817 | TORCH_FN(wrapper_CompositeImplicitAutograd__hstack)); |
7818 | m.impl("hstack.out" , |
7819 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_hstack_out)); |
7820 | m.impl("vstack" , |
7821 | TORCH_FN(wrapper_CompositeImplicitAutograd__vstack)); |
7822 | m.impl("vstack.out" , |
7823 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_vstack_out)); |
7824 | m.impl("dstack" , |
7825 | TORCH_FN(wrapper_CompositeImplicitAutograd__dstack)); |
7826 | m.impl("dstack.out" , |
7827 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_dstack_out)); |
7828 | m.impl("stft" , |
7829 | TORCH_FN(wrapper_CompositeImplicitAutograd__stft)); |
7830 | m.impl("stft.center" , |
7831 | TORCH_FN(wrapper_CompositeImplicitAutograd_center_stft)); |
7832 | m.impl("istft" , |
7833 | TORCH_FN(wrapper_CompositeImplicitAutograd__istft)); |
7834 | m.impl("stride.int" , |
7835 | TORCH_FN(wrapper_CompositeImplicitAutograd_int_stride)); |
7836 | m.impl("stride.Dimname" , |
7837 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_stride)); |
7838 | m.impl("sum.dim_DimnameList" , |
7839 | TORCH_FN(wrapper_CompositeImplicitAutograd_dim_DimnameList_sum)); |
7840 | m.impl("sum.DimnameList_out" , |
7841 | TORCH_FN(wrapper_CompositeImplicitAutograd_DimnameList_out_sum_out)); |
7842 | m.impl("sum_to_size" , |
7843 | TORCH_FN(wrapper_CompositeImplicitAutograd__sum_to_size)); |
7844 | m.impl("square" , |
7845 | TORCH_FN(wrapper_CompositeImplicitAutograd__square)); |
7846 | m.impl("square.out" , |
7847 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_square_out)); |
7848 | m.impl("square_" , |
7849 | TORCH_FN(wrapper_CompositeImplicitAutograd__square_)); |
7850 | m.impl("std" , |
7851 | TORCH_FN(wrapper_CompositeImplicitAutograd__std)); |
7852 | m.impl("std.dim" , |
7853 | TORCH_FN(wrapper_CompositeImplicitAutograd_dim_std)); |
7854 | m.impl("std.out" , |
7855 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_std_out)); |
7856 | m.impl("std_mean" , |
7857 | TORCH_FN(wrapper_CompositeImplicitAutograd__std_mean)); |
7858 | m.impl("std_mean.dim" , |
7859 | TORCH_FN(wrapper_CompositeImplicitAutograd_dim_std_mean)); |
7860 | m.impl("std_mean.names_dim" , |
7861 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_std_mean)); |
7862 | m.impl("std_mean.correction_names" , |
7863 | TORCH_FN(wrapper_CompositeImplicitAutograd_correction_names_std_mean)); |
7864 | m.impl("std.names_dim" , |
7865 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_std)); |
7866 | m.impl("std.names_out" , |
7867 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_std_out)); |
7868 | m.impl("std.correction_names" , |
7869 | TORCH_FN(wrapper_CompositeImplicitAutograd_correction_names_std)); |
7870 | m.impl("std.correction_names_out" , |
7871 | TORCH_FN(wrapper_CompositeImplicitAutograd_correction_names_out_std_out)); |
7872 | m.impl("prod.dim_Dimname" , |
7873 | TORCH_FN(wrapper_CompositeImplicitAutograd_dim_Dimname_prod)); |
7874 | m.impl("prod.Dimname_out" , |
7875 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_out_prod_out)); |
7876 | m.impl("tensordot" , |
7877 | TORCH_FN(wrapper_CompositeImplicitAutograd__tensordot)); |
7878 | m.impl("tile" , |
7879 | TORCH_FN(wrapper_CompositeImplicitAutograd__tile)); |
7880 | m.impl("transpose.Dimname" , |
7881 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_transpose)); |
7882 | m.impl("one_hot" , |
7883 | TORCH_FN(wrapper_CompositeImplicitAutograd__one_hot)); |
7884 | m.impl("fliplr" , |
7885 | TORCH_FN(wrapper_CompositeImplicitAutograd__fliplr)); |
7886 | m.impl("flipud" , |
7887 | TORCH_FN(wrapper_CompositeImplicitAutograd__flipud)); |
7888 | m.impl("trapezoid.x" , |
7889 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_trapezoid)); |
7890 | m.impl("trapezoid.dx" , |
7891 | TORCH_FN(wrapper_CompositeImplicitAutograd_dx_trapezoid)); |
7892 | m.impl("trapz.x" , |
7893 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_trapz)); |
7894 | m.impl("trapz.dx" , |
7895 | TORCH_FN(wrapper_CompositeImplicitAutograd_dx_trapz)); |
7896 | m.impl("triplet_margin_loss" , |
7897 | TORCH_FN(wrapper_CompositeImplicitAutograd__triplet_margin_loss)); |
7898 | m.impl("fix" , |
7899 | TORCH_FN(wrapper_CompositeImplicitAutograd__fix)); |
7900 | m.impl("fix.out" , |
7901 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fix_out)); |
7902 | m.impl("fix_" , |
7903 | TORCH_FN(wrapper_CompositeImplicitAutograd__fix_)); |
7904 | m.impl("type_as" , |
7905 | TORCH_FN(wrapper_CompositeImplicitAutograd__type_as)); |
7906 | m.impl("_has_compatible_shallow_copy_type" , |
7907 | TORCH_FN(wrapper_CompositeImplicitAutograd___has_compatible_shallow_copy_type)); |
7908 | m.impl("vander" , |
7909 | TORCH_FN(wrapper_CompositeImplicitAutograd__vander)); |
7910 | m.impl("var" , |
7911 | TORCH_FN(wrapper_CompositeImplicitAutograd__var)); |
7912 | m.impl("var.dim" , |
7913 | TORCH_FN(wrapper_CompositeImplicitAutograd_dim_var)); |
7914 | m.impl("var.out" , |
7915 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_var_out)); |
7916 | m.impl("var.names_dim" , |
7917 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_var)); |
7918 | m.impl("var.names_out" , |
7919 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_var_out)); |
7920 | m.impl("var.correction_names" , |
7921 | TORCH_FN(wrapper_CompositeImplicitAutograd_correction_names_var)); |
7922 | m.impl("var.correction_names_out" , |
7923 | TORCH_FN(wrapper_CompositeImplicitAutograd_correction_names_out_var_out)); |
7924 | m.impl("var_mean" , |
7925 | TORCH_FN(wrapper_CompositeImplicitAutograd__var_mean)); |
7926 | m.impl("var_mean.dim" , |
7927 | TORCH_FN(wrapper_CompositeImplicitAutograd_dim_var_mean)); |
7928 | m.impl("var_mean.names_dim" , |
7929 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dim_var_mean)); |
7930 | m.impl("var_mean.correction_names" , |
7931 | TORCH_FN(wrapper_CompositeImplicitAutograd_correction_names_var_mean)); |
7932 | m.impl("view_as" , |
7933 | TORCH_FN(wrapper_CompositeImplicitAutograd__view_as)); |
7934 | m.impl("where.ScalarSelf" , |
7935 | TORCH_FN(wrapper_CompositeImplicitAutograd_ScalarSelf_where)); |
7936 | m.impl("where.ScalarOther" , |
7937 | TORCH_FN(wrapper_CompositeImplicitAutograd_ScalarOther_where)); |
7938 | m.impl("where.Scalar" , |
7939 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_where)); |
7940 | m.impl("where" , |
7941 | TORCH_FN(wrapper_CompositeImplicitAutograd__where)); |
7942 | m.impl("norm_except_dim" , |
7943 | TORCH_FN(wrapper_CompositeImplicitAutograd__norm_except_dim)); |
7944 | m.impl("_weight_norm" , |
7945 | TORCH_FN(wrapper_CompositeImplicitAutograd___weight_norm)); |
7946 | m.impl("_weight_norm_differentiable_backward" , |
7947 | TORCH_FN(wrapper_CompositeImplicitAutograd___weight_norm_differentiable_backward)); |
7948 | m.impl("_sparse_sum" , |
7949 | TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_sum)); |
7950 | m.impl("_sparse_sum.dtype" , |
7951 | TORCH_FN(wrapper_CompositeImplicitAutograd_dtype__sparse_sum)); |
7952 | m.impl("_sparse_sum.dim_dtype" , |
7953 | TORCH_FN(wrapper_CompositeImplicitAutograd_dim_dtype__sparse_sum)); |
7954 | m.impl("_sparse_softmax.int" , |
7955 | TORCH_FN(wrapper_CompositeImplicitAutograd_int__sparse_softmax)); |
7956 | m.impl("_sparse_softmax.Dimname" , |
7957 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname__sparse_softmax)); |
7958 | m.impl("_sparse_log_softmax.int" , |
7959 | TORCH_FN(wrapper_CompositeImplicitAutograd_int__sparse_log_softmax)); |
7960 | m.impl("_sparse_log_softmax.Dimname" , |
7961 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname__sparse_log_softmax)); |
7962 | m.impl("norm.names_ScalarOpt_dim_dtype" , |
7963 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_ScalarOpt_dim_dtype_norm)); |
7964 | m.impl("norm.names_dtype_out" , |
7965 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_dtype_out_norm_out)); |
7966 | m.impl("norm.names_ScalarOpt_dim" , |
7967 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_ScalarOpt_dim_norm)); |
7968 | m.impl("norm.names_out" , |
7969 | TORCH_FN(wrapper_CompositeImplicitAutograd_names_out_norm_out)); |
7970 | m.impl("frobenius_norm.dim" , |
7971 | TORCH_FN(wrapper_CompositeImplicitAutograd_dim_frobenius_norm)); |
7972 | m.impl("frobenius_norm.out" , |
7973 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_frobenius_norm_out)); |
7974 | m.impl("nuclear_norm" , |
7975 | TORCH_FN(wrapper_CompositeImplicitAutograd__nuclear_norm)); |
7976 | m.impl("nuclear_norm.out" , |
7977 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_nuclear_norm_out)); |
7978 | m.impl("nuclear_norm.dim" , |
7979 | TORCH_FN(wrapper_CompositeImplicitAutograd_dim_nuclear_norm)); |
7980 | m.impl("nuclear_norm.dim_out" , |
7981 | TORCH_FN(wrapper_CompositeImplicitAutograd_dim_out_nuclear_norm_out)); |
7982 | m.impl("positive" , |
7983 | TORCH_FN(wrapper_CompositeImplicitAutograd__positive)); |
7984 | m.impl("subtract.Tensor" , |
7985 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_subtract)); |
7986 | m.impl("subtract.out" , |
7987 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_subtract_out)); |
7988 | m.impl("subtract_.Tensor" , |
7989 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_subtract_)); |
7990 | m.impl("subtract.Scalar" , |
7991 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_subtract)); |
7992 | m.impl("subtract_.Scalar" , |
7993 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_subtract_)); |
7994 | m.impl("sparse_compressed_tensor.comp_plain_value_size" , |
7995 | TORCH_FN(wrapper_CompositeImplicitAutograd_comp_plain_value_size_sparse_compressed_tensor)); |
7996 | m.impl("sparse_csr_tensor.crow_col_value_size" , |
7997 | TORCH_FN(wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_csr_tensor)); |
7998 | m.impl("sparse_csc_tensor.ccol_row_value_size" , |
7999 | TORCH_FN(wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_csc_tensor)); |
8000 | m.impl("sparse_bsr_tensor.crow_col_value_size" , |
8001 | TORCH_FN(wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_bsr_tensor)); |
8002 | m.impl("sparse_bsc_tensor.ccol_row_value_size" , |
8003 | TORCH_FN(wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_bsc_tensor)); |
8004 | m.impl("sparse_compressed_tensor.comp_plain_value" , |
8005 | TORCH_FN(wrapper_CompositeImplicitAutograd_comp_plain_value_sparse_compressed_tensor)); |
8006 | m.impl("sparse_csr_tensor.crow_col_value" , |
8007 | TORCH_FN(wrapper_CompositeImplicitAutograd_crow_col_value_sparse_csr_tensor)); |
8008 | m.impl("sparse_csc_tensor.ccol_row_value" , |
8009 | TORCH_FN(wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_csc_tensor)); |
8010 | m.impl("sparse_bsr_tensor.crow_col_value" , |
8011 | TORCH_FN(wrapper_CompositeImplicitAutograd_crow_col_value_sparse_bsr_tensor)); |
8012 | m.impl("sparse_bsc_tensor.ccol_row_value" , |
8013 | TORCH_FN(wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_bsc_tensor)); |
8014 | m.impl("_sparse_compressed_tensor_unsafe" , |
8015 | TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_compressed_tensor_unsafe)); |
8016 | m.impl("_sparse_csr_tensor_unsafe" , |
8017 | TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_csr_tensor_unsafe)); |
8018 | m.impl("_sparse_csc_tensor_unsafe" , |
8019 | TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_csc_tensor_unsafe)); |
8020 | m.impl("_sparse_bsr_tensor_unsafe" , |
8021 | TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_bsr_tensor_unsafe)); |
8022 | m.impl("_sparse_bsc_tensor_unsafe" , |
8023 | TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_bsc_tensor_unsafe)); |
8024 | m.impl("sparse_coo_tensor.indices" , |
8025 | TORCH_FN(wrapper_CompositeImplicitAutograd_indices_sparse_coo_tensor)); |
8026 | m.impl("sparse_coo_tensor.indices_size" , |
8027 | TORCH_FN(wrapper_CompositeImplicitAutograd_indices_size_sparse_coo_tensor)); |
8028 | m.impl("_sparse_coo_tensor_unsafe" , |
8029 | TORCH_FN(wrapper_CompositeImplicitAutograd___sparse_coo_tensor_unsafe)); |
8030 | m.impl("_validate_sparse_coo_tensor_args" , |
8031 | TORCH_FN(wrapper_CompositeImplicitAutograd___validate_sparse_coo_tensor_args)); |
8032 | m.impl("_validate_sparse_compressed_tensor_args" , |
8033 | TORCH_FN(wrapper_CompositeImplicitAutograd___validate_sparse_compressed_tensor_args)); |
8034 | m.impl("_validate_sparse_csr_tensor_args" , |
8035 | TORCH_FN(wrapper_CompositeImplicitAutograd___validate_sparse_csr_tensor_args)); |
8036 | m.impl("_validate_sparse_csc_tensor_args" , |
8037 | TORCH_FN(wrapper_CompositeImplicitAutograd___validate_sparse_csc_tensor_args)); |
8038 | m.impl("_validate_sparse_bsr_tensor_args" , |
8039 | TORCH_FN(wrapper_CompositeImplicitAutograd___validate_sparse_bsr_tensor_args)); |
8040 | m.impl("_validate_sparse_bsc_tensor_args" , |
8041 | TORCH_FN(wrapper_CompositeImplicitAutograd___validate_sparse_bsc_tensor_args)); |
8042 | m.impl("_to_cpu" , |
8043 | TORCH_FN(wrapper_CompositeImplicitAutograd___to_cpu)); |
8044 | m.impl("to_dense" , |
8045 | TORCH_FN(wrapper_CompositeImplicitAutograd__to_dense)); |
8046 | m.impl("to_dense_backward" , |
8047 | TORCH_FN(wrapper_CompositeImplicitAutograd__to_dense_backward)); |
8048 | m.impl("coalesce" , |
8049 | TORCH_FN(wrapper_CompositeImplicitAutograd__coalesce)); |
8050 | m.impl("unbind.Dimname" , |
8051 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_unbind)); |
8052 | m.impl("to_mkldnn_backward" , |
8053 | TORCH_FN(wrapper_CompositeImplicitAutograd__to_mkldnn_backward)); |
8054 | m.impl("fake_quantize_per_tensor_affine" , |
8055 | TORCH_FN(wrapper_CompositeImplicitAutograd__fake_quantize_per_tensor_affine)); |
8056 | m.impl("fake_quantize_per_tensor_affine.tensor_qparams" , |
8057 | TORCH_FN(wrapper_CompositeImplicitAutograd_tensor_qparams_fake_quantize_per_tensor_affine)); |
8058 | m.impl("fake_quantize_per_tensor_affine_cachemask_backward" , |
8059 | TORCH_FN(wrapper_CompositeImplicitAutograd__fake_quantize_per_tensor_affine_cachemask_backward)); |
8060 | m.impl("fake_quantize_per_channel_affine" , |
8061 | TORCH_FN(wrapper_CompositeImplicitAutograd__fake_quantize_per_channel_affine)); |
8062 | m.impl("fake_quantize_per_channel_affine_cachemask_backward" , |
8063 | TORCH_FN(wrapper_CompositeImplicitAutograd__fake_quantize_per_channel_affine_cachemask_backward)); |
8064 | m.impl("fused_moving_avg_obs_fake_quant" , |
8065 | TORCH_FN(wrapper_CompositeImplicitAutograd__fused_moving_avg_obs_fake_quant)); |
8066 | m.impl("_choose_qparams_per_tensor" , |
8067 | TORCH_FN(wrapper_CompositeImplicitAutograd___choose_qparams_per_tensor)); |
8068 | m.impl("_saturate_weight_to_fp16" , |
8069 | TORCH_FN(wrapper_CompositeImplicitAutograd___saturate_weight_to_fp16)); |
8070 | m.impl("choose_qparams_optimized" , |
8071 | TORCH_FN(wrapper_CompositeImplicitAutograd__choose_qparams_optimized)); |
8072 | m.impl("_autocast_to_reduced_precision" , |
8073 | TORCH_FN(wrapper_CompositeImplicitAutograd___autocast_to_reduced_precision)); |
8074 | m.impl("_autocast_to_full_precision" , |
8075 | TORCH_FN(wrapper_CompositeImplicitAutograd___autocast_to_full_precision)); |
8076 | m.impl("to.dtype_layout" , |
8077 | TORCH_FN(wrapper_CompositeImplicitAutograd_dtype_layout_to)); |
8078 | m.impl("to.device" , |
8079 | TORCH_FN(wrapper_CompositeImplicitAutograd_device_to)); |
8080 | m.impl("to.dtype" , |
8081 | TORCH_FN(wrapper_CompositeImplicitAutograd_dtype_to)); |
8082 | m.impl("to.other" , |
8083 | TORCH_FN(wrapper_CompositeImplicitAutograd_other_to)); |
8084 | m.impl("meshgrid" , |
8085 | TORCH_FN(wrapper_CompositeImplicitAutograd__meshgrid)); |
8086 | m.impl("meshgrid.indexing" , |
8087 | TORCH_FN(wrapper_CompositeImplicitAutograd_indexing_meshgrid)); |
8088 | m.impl("cartesian_prod" , |
8089 | TORCH_FN(wrapper_CompositeImplicitAutograd__cartesian_prod)); |
8090 | m.impl("combinations" , |
8091 | TORCH_FN(wrapper_CompositeImplicitAutograd__combinations)); |
8092 | m.impl("item" , |
8093 | TORCH_FN(wrapper_CompositeImplicitAutograd__item)); |
8094 | m.impl("result_type.Tensor" , |
8095 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_result_type)); |
8096 | m.impl("result_type.Scalar" , |
8097 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_result_type)); |
8098 | m.impl("result_type.Scalar_Tensor" , |
8099 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_Tensor_result_type)); |
8100 | m.impl("result_type.Scalar_Scalar" , |
8101 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_Scalar_result_type)); |
8102 | m.impl("can_cast" , |
8103 | TORCH_FN(wrapper_CompositeImplicitAutograd__can_cast)); |
8104 | m.impl("promote_types" , |
8105 | TORCH_FN(wrapper_CompositeImplicitAutograd__promote_types)); |
8106 | m.impl("_thnn_fused_lstm_cell_backward" , |
8107 | TORCH_FN(wrapper_CompositeImplicitAutograd___thnn_fused_lstm_cell_backward)); |
8108 | m.impl("_thnn_differentiable_lstm_cell_backward" , |
8109 | TORCH_FN(wrapper_CompositeImplicitAutograd___thnn_differentiable_lstm_cell_backward)); |
8110 | m.impl("_thnn_differentiable_gru_cell_backward" , |
8111 | TORCH_FN(wrapper_CompositeImplicitAutograd___thnn_differentiable_gru_cell_backward)); |
8112 | m.impl("lstm.input" , |
8113 | TORCH_FN(wrapper_CompositeImplicitAutograd_input_lstm)); |
8114 | m.impl("lstm.data" , |
8115 | TORCH_FN(wrapper_CompositeImplicitAutograd_data_lstm)); |
8116 | m.impl("gru.input" , |
8117 | TORCH_FN(wrapper_CompositeImplicitAutograd_input_gru)); |
8118 | m.impl("gru.data" , |
8119 | TORCH_FN(wrapper_CompositeImplicitAutograd_data_gru)); |
8120 | m.impl("rnn_tanh.input" , |
8121 | TORCH_FN(wrapper_CompositeImplicitAutograd_input_rnn_tanh)); |
8122 | m.impl("rnn_tanh.data" , |
8123 | TORCH_FN(wrapper_CompositeImplicitAutograd_data_rnn_tanh)); |
8124 | m.impl("rnn_relu.input" , |
8125 | TORCH_FN(wrapper_CompositeImplicitAutograd_input_rnn_relu)); |
8126 | m.impl("rnn_relu.data" , |
8127 | TORCH_FN(wrapper_CompositeImplicitAutograd_data_rnn_relu)); |
8128 | m.impl("lstm_cell" , |
8129 | TORCH_FN(wrapper_CompositeImplicitAutograd__lstm_cell)); |
8130 | m.impl("gru_cell" , |
8131 | TORCH_FN(wrapper_CompositeImplicitAutograd__gru_cell)); |
8132 | m.impl("rnn_tanh_cell" , |
8133 | TORCH_FN(wrapper_CompositeImplicitAutograd__rnn_tanh_cell)); |
8134 | m.impl("rnn_relu_cell" , |
8135 | TORCH_FN(wrapper_CompositeImplicitAutograd__rnn_relu_cell)); |
8136 | m.impl("quantized_lstm_cell" , |
8137 | TORCH_FN(wrapper_CompositeImplicitAutograd__quantized_lstm_cell)); |
8138 | m.impl("quantized_gru_cell" , |
8139 | TORCH_FN(wrapper_CompositeImplicitAutograd__quantized_gru_cell)); |
8140 | m.impl("quantized_rnn_relu_cell" , |
8141 | TORCH_FN(wrapper_CompositeImplicitAutograd__quantized_rnn_relu_cell)); |
8142 | m.impl("quantized_rnn_tanh_cell" , |
8143 | TORCH_FN(wrapper_CompositeImplicitAutograd__quantized_rnn_tanh_cell)); |
8144 | m.impl("_pack_padded_sequence_backward" , |
8145 | TORCH_FN(wrapper_CompositeImplicitAutograd___pack_padded_sequence_backward)); |
8146 | m.impl("_pad_packed_sequence" , |
8147 | TORCH_FN(wrapper_CompositeImplicitAutograd___pad_packed_sequence)); |
8148 | m.impl("set_.source_Tensor_storage_offset" , |
8149 | TORCH_FN(wrapper_CompositeImplicitAutograd_source_Tensor_storage_offset_set_)); |
8150 | m.impl("index_add.dimname" , |
8151 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_index_add)); |
8152 | m.impl("index_fill_.Dimname_Scalar" , |
8153 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_Scalar_index_fill_)); |
8154 | m.impl("index_fill.Dimname_Scalar" , |
8155 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_Scalar_index_fill)); |
8156 | m.impl("index_fill_.Dimname_Tensor" , |
8157 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_Tensor_index_fill_)); |
8158 | m.impl("index_fill.Dimname_Tensor" , |
8159 | TORCH_FN(wrapper_CompositeImplicitAutograd_Dimname_Tensor_index_fill)); |
8160 | m.impl("scatter.dimname_src" , |
8161 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_src_scatter)); |
8162 | m.impl("scatter.dimname_value" , |
8163 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_value_scatter)); |
8164 | m.impl("scatter_add.dimname" , |
8165 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_scatter_add)); |
8166 | m.impl("bitwise_and_.Scalar" , |
8167 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_bitwise_and_)); |
8168 | m.impl("__and__.Scalar" , |
8169 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar___and__)); |
8170 | m.impl("__iand__.Scalar" , |
8171 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar___iand__)); |
8172 | m.impl("__and__.Tensor" , |
8173 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor___and__)); |
8174 | m.impl("__iand__.Tensor" , |
8175 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor___iand__)); |
8176 | m.impl("bitwise_or.Scalar" , |
8177 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_bitwise_or)); |
8178 | m.impl("bitwise_or_.Scalar" , |
8179 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_bitwise_or_)); |
8180 | m.impl("__or__.Scalar" , |
8181 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar___or__)); |
8182 | m.impl("__ior__.Scalar" , |
8183 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar___ior__)); |
8184 | m.impl("__or__.Tensor" , |
8185 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor___or__)); |
8186 | m.impl("__ior__.Tensor" , |
8187 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor___ior__)); |
8188 | m.impl("bitwise_xor.Scalar" , |
8189 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_bitwise_xor)); |
8190 | m.impl("bitwise_xor_.Scalar" , |
8191 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_bitwise_xor_)); |
8192 | m.impl("__xor__.Scalar" , |
8193 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar___xor__)); |
8194 | m.impl("__ixor__.Scalar" , |
8195 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar___ixor__)); |
8196 | m.impl("__xor__.Tensor" , |
8197 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor___xor__)); |
8198 | m.impl("__ixor__.Tensor" , |
8199 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor___ixor__)); |
8200 | m.impl("diag" , |
8201 | TORCH_FN(wrapper_CompositeImplicitAutograd__diag)); |
8202 | m.impl("diag.out" , |
8203 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_diag_out)); |
8204 | m.impl("cross" , |
8205 | TORCH_FN(wrapper_CompositeImplicitAutograd__cross)); |
8206 | m.impl("cross.out" , |
8207 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_cross_out)); |
8208 | m.impl("trace_backward" , |
8209 | TORCH_FN(wrapper_CompositeImplicitAutograd__trace_backward)); |
8210 | m.impl("not_equal.Scalar" , |
8211 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_not_equal)); |
8212 | m.impl("not_equal.Scalar_out" , |
8213 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_out_not_equal_out)); |
8214 | m.impl("not_equal_.Scalar" , |
8215 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_not_equal_)); |
8216 | m.impl("not_equal.Tensor" , |
8217 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_not_equal)); |
8218 | m.impl("not_equal.Tensor_out" , |
8219 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_out_not_equal_out)); |
8220 | m.impl("not_equal_.Tensor" , |
8221 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_not_equal_)); |
8222 | m.impl("greater_equal.Scalar" , |
8223 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_greater_equal)); |
8224 | m.impl("greater_equal.Scalar_out" , |
8225 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_out_greater_equal_out)); |
8226 | m.impl("greater_equal_.Scalar" , |
8227 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_greater_equal_)); |
8228 | m.impl("greater_equal.Tensor" , |
8229 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_greater_equal)); |
8230 | m.impl("greater_equal.Tensor_out" , |
8231 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_out_greater_equal_out)); |
8232 | m.impl("greater_equal_.Tensor" , |
8233 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_greater_equal_)); |
8234 | m.impl("less_equal.Scalar" , |
8235 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_less_equal)); |
8236 | m.impl("less_equal.Scalar_out" , |
8237 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_out_less_equal_out)); |
8238 | m.impl("less_equal_.Scalar" , |
8239 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_less_equal_)); |
8240 | m.impl("less_equal.Tensor" , |
8241 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_less_equal)); |
8242 | m.impl("less_equal.Tensor_out" , |
8243 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_out_less_equal_out)); |
8244 | m.impl("less_equal_.Tensor" , |
8245 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_less_equal_)); |
8246 | m.impl("greater.Scalar" , |
8247 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_greater)); |
8248 | m.impl("greater.Scalar_out" , |
8249 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_out_greater_out)); |
8250 | m.impl("greater_.Scalar" , |
8251 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_greater_)); |
8252 | m.impl("greater.Tensor" , |
8253 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_greater)); |
8254 | m.impl("greater.Tensor_out" , |
8255 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_out_greater_out)); |
8256 | m.impl("greater_.Tensor" , |
8257 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_greater_)); |
8258 | m.impl("less.Scalar" , |
8259 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_less)); |
8260 | m.impl("less.Scalar_out" , |
8261 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_out_less_out)); |
8262 | m.impl("less_.Scalar" , |
8263 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_less_)); |
8264 | m.impl("less.Tensor" , |
8265 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_less)); |
8266 | m.impl("less.Tensor_out" , |
8267 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_out_less_out)); |
8268 | m.impl("less_.Tensor" , |
8269 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_less_)); |
8270 | m.impl("take_along_dim" , |
8271 | TORCH_FN(wrapper_CompositeImplicitAutograd__take_along_dim)); |
8272 | m.impl("take_along_dim.out" , |
8273 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_take_along_dim_out)); |
8274 | m.impl("index_select.dimname" , |
8275 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_index_select)); |
8276 | m.impl("index_select.dimname_out" , |
8277 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_index_select_out)); |
8278 | m.impl("index_select_backward" , |
8279 | TORCH_FN(wrapper_CompositeImplicitAutograd__index_select_backward)); |
8280 | m.impl("masked_select_backward" , |
8281 | TORCH_FN(wrapper_CompositeImplicitAutograd__masked_select_backward)); |
8282 | m.impl("nonzero_numpy" , |
8283 | TORCH_FN(wrapper_CompositeImplicitAutograd__nonzero_numpy)); |
8284 | m.impl("argwhere" , |
8285 | TORCH_FN(wrapper_CompositeImplicitAutograd__argwhere)); |
8286 | m.impl("gather_backward" , |
8287 | TORCH_FN(wrapper_CompositeImplicitAutograd__gather_backward)); |
8288 | m.impl("gather.dimname" , |
8289 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_gather)); |
8290 | m.impl("gather.dimname_out" , |
8291 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_out_gather_out)); |
8292 | m.impl("_gather_sparse_backward" , |
8293 | TORCH_FN(wrapper_CompositeImplicitAutograd___gather_sparse_backward)); |
8294 | m.impl("cross_entropy_loss" , |
8295 | TORCH_FN(wrapper_CompositeImplicitAutograd__cross_entropy_loss)); |
8296 | m.impl("linalg_vander" , |
8297 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_vander)); |
8298 | m.impl("svd" , |
8299 | TORCH_FN(wrapper_CompositeImplicitAutograd__svd)); |
8300 | m.impl("svd.U" , |
8301 | TORCH_FN(wrapper_CompositeImplicitAutograd_U_svd_out)); |
8302 | m.impl("swapaxes" , |
8303 | TORCH_FN(wrapper_CompositeImplicitAutograd__swapaxes)); |
8304 | m.impl("swapaxes_" , |
8305 | TORCH_FN(wrapper_CompositeImplicitAutograd__swapaxes_)); |
8306 | m.impl("swapdims" , |
8307 | TORCH_FN(wrapper_CompositeImplicitAutograd__swapdims)); |
8308 | m.impl("swapdims_" , |
8309 | TORCH_FN(wrapper_CompositeImplicitAutograd__swapdims_)); |
8310 | m.impl("qr" , |
8311 | TORCH_FN(wrapper_CompositeImplicitAutograd__qr)); |
8312 | m.impl("qr.Q" , |
8313 | TORCH_FN(wrapper_CompositeImplicitAutograd_Q_qr_out)); |
8314 | m.impl("orgqr" , |
8315 | TORCH_FN(wrapper_CompositeImplicitAutograd__orgqr)); |
8316 | m.impl("orgqr.out" , |
8317 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_orgqr_out)); |
8318 | m.impl("_lu_with_info" , |
8319 | TORCH_FN(wrapper_CompositeImplicitAutograd___lu_with_info)); |
8320 | m.impl("lu_solve" , |
8321 | TORCH_FN(wrapper_CompositeImplicitAutograd__lu_solve)); |
8322 | m.impl("lu_solve.out" , |
8323 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_lu_solve_out)); |
8324 | m.impl("arctan2" , |
8325 | TORCH_FN(wrapper_CompositeImplicitAutograd__arctan2)); |
8326 | m.impl("arctan2.out" , |
8327 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_arctan2_out)); |
8328 | m.impl("arctan2_" , |
8329 | TORCH_FN(wrapper_CompositeImplicitAutograd__arctan2_)); |
8330 | m.impl("histogramdd" , |
8331 | TORCH_FN(wrapper_CompositeImplicitAutograd__histogramdd)); |
8332 | m.impl("histogramdd.int_bins" , |
8333 | TORCH_FN(wrapper_CompositeImplicitAutograd_int_bins_histogramdd)); |
8334 | m.impl("histogramdd.TensorList_bins" , |
8335 | TORCH_FN(wrapper_CompositeImplicitAutograd_TensorList_bins_histogramdd)); |
8336 | m.impl("max.other" , |
8337 | TORCH_FN(wrapper_CompositeImplicitAutograd_other_max)); |
8338 | m.impl("max.out" , |
8339 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_max_out)); |
8340 | m.impl("min.other" , |
8341 | TORCH_FN(wrapper_CompositeImplicitAutograd_other_min)); |
8342 | m.impl("min.out" , |
8343 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_min_out)); |
8344 | m.impl("quantile" , |
8345 | TORCH_FN(wrapper_CompositeImplicitAutograd__quantile)); |
8346 | m.impl("quantile.out" , |
8347 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_quantile_out)); |
8348 | m.impl("quantile.scalar" , |
8349 | TORCH_FN(wrapper_CompositeImplicitAutograd_scalar_quantile)); |
8350 | m.impl("quantile.scalar_out" , |
8351 | TORCH_FN(wrapper_CompositeImplicitAutograd_scalar_out_quantile_out)); |
8352 | m.impl("nanquantile" , |
8353 | TORCH_FN(wrapper_CompositeImplicitAutograd__nanquantile)); |
8354 | m.impl("nanquantile.out" , |
8355 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_nanquantile_out)); |
8356 | m.impl("nanquantile.scalar" , |
8357 | TORCH_FN(wrapper_CompositeImplicitAutograd_scalar_nanquantile)); |
8358 | m.impl("nanquantile.scalar_out" , |
8359 | TORCH_FN(wrapper_CompositeImplicitAutograd_scalar_out_nanquantile_out)); |
8360 | m.impl("sort.dimname" , |
8361 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_sort)); |
8362 | m.impl("sort.dimname_values" , |
8363 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_values_sort_out)); |
8364 | m.impl("sort.dimname_stable" , |
8365 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_stable_sort)); |
8366 | m.impl("sort.dimname_values_stable" , |
8367 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_values_stable_sort_out)); |
8368 | m.impl("msort" , |
8369 | TORCH_FN(wrapper_CompositeImplicitAutograd__msort)); |
8370 | m.impl("msort.out" , |
8371 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_msort_out)); |
8372 | m.impl("argsort" , |
8373 | TORCH_FN(wrapper_CompositeImplicitAutograd__argsort)); |
8374 | m.impl("argsort.dimname" , |
8375 | TORCH_FN(wrapper_CompositeImplicitAutograd_dimname_argsort)); |
8376 | m.impl("float_power.Tensor_Tensor" , |
8377 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_Tensor_float_power)); |
8378 | m.impl("float_power.Tensor_Tensor_out" , |
8379 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_Tensor_out_float_power_out)); |
8380 | m.impl("float_power_.Tensor" , |
8381 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_float_power_)); |
8382 | m.impl("float_power.Scalar" , |
8383 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_float_power)); |
8384 | m.impl("float_power.Scalar_out" , |
8385 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_out_float_power_out)); |
8386 | m.impl("float_power.Tensor_Scalar" , |
8387 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_Scalar_float_power)); |
8388 | m.impl("float_power.Tensor_Scalar_out" , |
8389 | TORCH_FN(wrapper_CompositeImplicitAutograd_Tensor_Scalar_out_float_power_out)); |
8390 | m.impl("float_power_.Scalar" , |
8391 | TORCH_FN(wrapper_CompositeImplicitAutograd_Scalar_float_power_)); |
8392 | m.impl("l1_loss" , |
8393 | TORCH_FN(wrapper_CompositeImplicitAutograd__l1_loss)); |
8394 | m.impl("multilabel_margin_loss" , |
8395 | TORCH_FN(wrapper_CompositeImplicitAutograd__multilabel_margin_loss)); |
8396 | m.impl("multilabel_margin_loss.out" , |
8397 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_multilabel_margin_loss_out)); |
8398 | m.impl("nll_loss" , |
8399 | TORCH_FN(wrapper_CompositeImplicitAutograd__nll_loss)); |
8400 | m.impl("nll_loss.out" , |
8401 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_nll_loss_out)); |
8402 | m.impl("nll_loss_nd" , |
8403 | TORCH_FN(wrapper_CompositeImplicitAutograd__nll_loss_nd)); |
8404 | m.impl("nll_loss2d" , |
8405 | TORCH_FN(wrapper_CompositeImplicitAutograd__nll_loss2d)); |
8406 | m.impl("nll_loss2d.out" , |
8407 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_nll_loss2d_out)); |
8408 | m.impl("log_sigmoid" , |
8409 | TORCH_FN(wrapper_CompositeImplicitAutograd__log_sigmoid)); |
8410 | m.impl("log_sigmoid.out" , |
8411 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_log_sigmoid_out)); |
8412 | m.impl("adaptive_avg_pool2d" , |
8413 | TORCH_FN(wrapper_CompositeImplicitAutograd__adaptive_avg_pool2d)); |
8414 | m.impl("adaptive_avg_pool3d" , |
8415 | TORCH_FN(wrapper_CompositeImplicitAutograd__adaptive_avg_pool3d)); |
8416 | m.impl("_pad_circular" , |
8417 | TORCH_FN(wrapper_CompositeImplicitAutograd___pad_circular)); |
8418 | m.impl("_pad_enum" , |
8419 | TORCH_FN(wrapper_CompositeImplicitAutograd___pad_enum)); |
8420 | m.impl("pad" , |
8421 | TORCH_FN(wrapper_CompositeImplicitAutograd__pad)); |
8422 | m.impl("upsample_linear1d.vec" , |
8423 | TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_linear1d)); |
8424 | m.impl("upsample_bilinear2d.vec" , |
8425 | TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_bilinear2d)); |
8426 | m.impl("_upsample_bilinear2d_aa.vec" , |
8427 | TORCH_FN(wrapper_CompositeImplicitAutograd_vec__upsample_bilinear2d_aa)); |
8428 | m.impl("upsample_trilinear3d.vec" , |
8429 | TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_trilinear3d)); |
8430 | m.impl("upsample_bicubic2d.vec" , |
8431 | TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_bicubic2d)); |
8432 | m.impl("_upsample_bicubic2d_aa.vec" , |
8433 | TORCH_FN(wrapper_CompositeImplicitAutograd_vec__upsample_bicubic2d_aa)); |
8434 | m.impl("upsample_nearest1d.vec" , |
8435 | TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_nearest1d)); |
8436 | m.impl("_upsample_nearest_exact1d.vec" , |
8437 | TORCH_FN(wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact1d)); |
8438 | m.impl("upsample_nearest2d.vec" , |
8439 | TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_nearest2d)); |
8440 | m.impl("_upsample_nearest_exact2d.vec" , |
8441 | TORCH_FN(wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact2d)); |
8442 | m.impl("upsample_nearest3d.vec" , |
8443 | TORCH_FN(wrapper_CompositeImplicitAutograd_vec_upsample_nearest3d)); |
8444 | m.impl("_upsample_nearest_exact3d.vec" , |
8445 | TORCH_FN(wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact3d)); |
8446 | m.impl("thnn_conv2d" , |
8447 | TORCH_FN(wrapper_CompositeImplicitAutograd__thnn_conv2d)); |
8448 | m.impl("thnn_conv2d.out" , |
8449 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_thnn_conv2d_out)); |
8450 | m.impl("slow_conv3d" , |
8451 | TORCH_FN(wrapper_CompositeImplicitAutograd__slow_conv3d)); |
8452 | m.impl("slow_conv3d.out" , |
8453 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_slow_conv3d_out)); |
8454 | m.impl("column_stack" , |
8455 | TORCH_FN(wrapper_CompositeImplicitAutograd__column_stack)); |
8456 | m.impl("column_stack.out" , |
8457 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_column_stack_out)); |
8458 | m.impl("isfinite" , |
8459 | TORCH_FN(wrapper_CompositeImplicitAutograd__isfinite)); |
8460 | m.impl("_add_batch_dim" , |
8461 | TORCH_FN(wrapper_CompositeImplicitAutograd___add_batch_dim)); |
8462 | m.impl("_remove_batch_dim" , |
8463 | TORCH_FN(wrapper_CompositeImplicitAutograd___remove_batch_dim)); |
8464 | m.impl("special_expm1" , |
8465 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_expm1)); |
8466 | m.impl("special_expm1.out" , |
8467 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_expm1_out)); |
8468 | m.impl("special_exp2" , |
8469 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_exp2)); |
8470 | m.impl("special_exp2.out" , |
8471 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_exp2_out)); |
8472 | m.impl("special_psi" , |
8473 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_psi)); |
8474 | m.impl("special_psi.out" , |
8475 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_psi_out)); |
8476 | m.impl("special_digamma" , |
8477 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_digamma)); |
8478 | m.impl("special_digamma.out" , |
8479 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_digamma_out)); |
8480 | m.impl("special_gammaln" , |
8481 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_gammaln)); |
8482 | m.impl("special_gammaln.out" , |
8483 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_gammaln_out)); |
8484 | m.impl("special_erf" , |
8485 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_erf)); |
8486 | m.impl("special_erf.out" , |
8487 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_erf_out)); |
8488 | m.impl("special_erfc" , |
8489 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_erfc)); |
8490 | m.impl("special_erfc.out" , |
8491 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_erfc_out)); |
8492 | m.impl("special_erfinv" , |
8493 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_erfinv)); |
8494 | m.impl("special_erfinv.out" , |
8495 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_erfinv_out)); |
8496 | m.impl("special_ndtr" , |
8497 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_ndtr)); |
8498 | m.impl("special_ndtr.out" , |
8499 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_ndtr_out)); |
8500 | m.impl("special_xlogy" , |
8501 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_xlogy)); |
8502 | m.impl("special_xlogy.out" , |
8503 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_xlogy_out)); |
8504 | m.impl("special_xlogy.self_scalar" , |
8505 | TORCH_FN(wrapper_CompositeImplicitAutograd_self_scalar_special_xlogy)); |
8506 | m.impl("special_xlogy.self_scalar_out" , |
8507 | TORCH_FN(wrapper_CompositeImplicitAutograd_self_scalar_out_special_xlogy_out)); |
8508 | m.impl("special_xlogy.other_scalar" , |
8509 | TORCH_FN(wrapper_CompositeImplicitAutograd_other_scalar_special_xlogy)); |
8510 | m.impl("special_xlogy.other_scalar_out" , |
8511 | TORCH_FN(wrapper_CompositeImplicitAutograd_other_scalar_out_special_xlogy_out)); |
8512 | m.impl("special_i0" , |
8513 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_i0)); |
8514 | m.impl("special_i0.out" , |
8515 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_i0_out)); |
8516 | m.impl("special_logit" , |
8517 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_logit)); |
8518 | m.impl("special_logit.out" , |
8519 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_logit_out)); |
8520 | m.impl("special_polygamma" , |
8521 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_polygamma)); |
8522 | m.impl("special_polygamma.out" , |
8523 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_polygamma_out)); |
8524 | m.impl("special_logsumexp" , |
8525 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_logsumexp)); |
8526 | m.impl("special_logsumexp.out" , |
8527 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_logsumexp_out)); |
8528 | m.impl("special_expit" , |
8529 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_expit)); |
8530 | m.impl("special_expit.out" , |
8531 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_expit_out)); |
8532 | m.impl("special_sinc" , |
8533 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_sinc)); |
8534 | m.impl("special_sinc.out" , |
8535 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_sinc_out)); |
8536 | m.impl("special_round" , |
8537 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_round)); |
8538 | m.impl("special_round.out" , |
8539 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_round_out)); |
8540 | m.impl("special_log1p" , |
8541 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_log1p)); |
8542 | m.impl("special_log1p.out" , |
8543 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_log1p_out)); |
8544 | m.impl("special_log_softmax" , |
8545 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_log_softmax)); |
8546 | m.impl("special_gammainc" , |
8547 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_gammainc)); |
8548 | m.impl("special_gammainc.out" , |
8549 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_gammainc_out)); |
8550 | m.impl("special_gammaincc" , |
8551 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_gammaincc)); |
8552 | m.impl("special_gammaincc.out" , |
8553 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_gammaincc_out)); |
8554 | m.impl("special_multigammaln" , |
8555 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_multigammaln)); |
8556 | m.impl("special_multigammaln.out" , |
8557 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_special_multigammaln_out)); |
8558 | m.impl("special_softmax" , |
8559 | TORCH_FN(wrapper_CompositeImplicitAutograd__special_softmax)); |
8560 | m.impl("fft_fft" , |
8561 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_fft)); |
8562 | m.impl("fft_fft.out" , |
8563 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_fft_out)); |
8564 | m.impl("fft_ifft" , |
8565 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ifft)); |
8566 | m.impl("fft_ifft.out" , |
8567 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_ifft_out)); |
8568 | m.impl("fft_rfft" , |
8569 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_rfft)); |
8570 | m.impl("fft_rfft.out" , |
8571 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_rfft_out)); |
8572 | m.impl("fft_irfft" , |
8573 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_irfft)); |
8574 | m.impl("fft_irfft.out" , |
8575 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_irfft_out)); |
8576 | m.impl("fft_hfft" , |
8577 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_hfft)); |
8578 | m.impl("fft_hfft.out" , |
8579 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_hfft_out)); |
8580 | m.impl("fft_ihfft" , |
8581 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ihfft)); |
8582 | m.impl("fft_ihfft.out" , |
8583 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_ihfft_out)); |
8584 | m.impl("fft_fft2" , |
8585 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_fft2)); |
8586 | m.impl("fft_fft2.out" , |
8587 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_fft2_out)); |
8588 | m.impl("fft_ifft2" , |
8589 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ifft2)); |
8590 | m.impl("fft_ifft2.out" , |
8591 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_ifft2_out)); |
8592 | m.impl("fft_rfft2" , |
8593 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_rfft2)); |
8594 | m.impl("fft_rfft2.out" , |
8595 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_rfft2_out)); |
8596 | m.impl("fft_irfft2" , |
8597 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_irfft2)); |
8598 | m.impl("fft_irfft2.out" , |
8599 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_irfft2_out)); |
8600 | m.impl("fft_hfft2" , |
8601 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_hfft2)); |
8602 | m.impl("fft_hfft2.out" , |
8603 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_hfft2_out)); |
8604 | m.impl("fft_ihfft2" , |
8605 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ihfft2)); |
8606 | m.impl("fft_ihfft2.out" , |
8607 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_ihfft2_out)); |
8608 | m.impl("fft_fftn" , |
8609 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_fftn)); |
8610 | m.impl("fft_fftn.out" , |
8611 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_fftn_out)); |
8612 | m.impl("fft_ifftn" , |
8613 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ifftn)); |
8614 | m.impl("fft_ifftn.out" , |
8615 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_ifftn_out)); |
8616 | m.impl("fft_rfftn" , |
8617 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_rfftn)); |
8618 | m.impl("fft_rfftn.out" , |
8619 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_rfftn_out)); |
8620 | m.impl("fft_irfftn" , |
8621 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_irfftn)); |
8622 | m.impl("fft_irfftn.out" , |
8623 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_irfftn_out)); |
8624 | m.impl("fft_hfftn" , |
8625 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_hfftn)); |
8626 | m.impl("fft_hfftn.out" , |
8627 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_hfftn_out)); |
8628 | m.impl("fft_ihfftn" , |
8629 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ihfftn)); |
8630 | m.impl("fft_ihfftn.out" , |
8631 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_fft_ihfftn_out)); |
8632 | m.impl("fft_fftshift" , |
8633 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_fftshift)); |
8634 | m.impl("fft_ifftshift" , |
8635 | TORCH_FN(wrapper_CompositeImplicitAutograd__fft_ifftshift)); |
8636 | m.impl("linalg_cholesky" , |
8637 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_cholesky)); |
8638 | m.impl("linalg_cholesky.out" , |
8639 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_cholesky_out)); |
8640 | m.impl("linalg_lu_factor" , |
8641 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_lu_factor)); |
8642 | m.impl("linalg_lu_factor.out" , |
8643 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_lu_factor_out)); |
8644 | m.impl("linalg_det" , |
8645 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_det)); |
8646 | m.impl("linalg_det.out" , |
8647 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_det_out)); |
8648 | m.impl("det" , |
8649 | TORCH_FN(wrapper_CompositeImplicitAutograd__det)); |
8650 | m.impl("linalg_ldl_factor" , |
8651 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_ldl_factor)); |
8652 | m.impl("linalg_ldl_factor.out" , |
8653 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_ldl_factor_out)); |
8654 | m.impl("linalg_matmul" , |
8655 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_matmul)); |
8656 | m.impl("linalg_matmul.out" , |
8657 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_matmul_out)); |
8658 | m.impl("linalg_vecdot" , |
8659 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_vecdot)); |
8660 | m.impl("linalg_vecdot.out" , |
8661 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_vecdot_out)); |
8662 | m.impl("linalg_slogdet" , |
8663 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_slogdet)); |
8664 | m.impl("linalg_slogdet.out" , |
8665 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_slogdet_out)); |
8666 | m.impl("slogdet" , |
8667 | TORCH_FN(wrapper_CompositeImplicitAutograd__slogdet)); |
8668 | m.impl("slogdet.out" , |
8669 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_slogdet_out)); |
8670 | m.impl("logdet" , |
8671 | TORCH_FN(wrapper_CompositeImplicitAutograd__logdet)); |
8672 | m.impl("linalg_eigvals" , |
8673 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_eigvals)); |
8674 | m.impl("linalg_eigvals.out" , |
8675 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_eigvals_out)); |
8676 | m.impl("linalg_eigh" , |
8677 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_eigh)); |
8678 | m.impl("linalg_eigh.eigvals" , |
8679 | TORCH_FN(wrapper_CompositeImplicitAutograd_eigvals_linalg_eigh_out)); |
8680 | m.impl("linalg_eigvalsh" , |
8681 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_eigvalsh)); |
8682 | m.impl("linalg_eigvalsh.out" , |
8683 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_eigvalsh_out)); |
8684 | m.impl("linalg_inv" , |
8685 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_inv)); |
8686 | m.impl("linalg_inv.out" , |
8687 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_inv_out)); |
8688 | m.impl("inverse" , |
8689 | TORCH_FN(wrapper_CompositeImplicitAutograd__inverse)); |
8690 | m.impl("inverse.out" , |
8691 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_inverse_out)); |
8692 | m.impl("inner" , |
8693 | TORCH_FN(wrapper_CompositeImplicitAutograd__inner)); |
8694 | m.impl("inner.out" , |
8695 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_inner_out)); |
8696 | m.impl("outer" , |
8697 | TORCH_FN(wrapper_CompositeImplicitAutograd__outer)); |
8698 | m.impl("outer.out" , |
8699 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_outer_out)); |
8700 | m.impl("ger" , |
8701 | TORCH_FN(wrapper_CompositeImplicitAutograd__ger)); |
8702 | m.impl("ger.out" , |
8703 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_ger_out)); |
8704 | m.impl("linalg_norm" , |
8705 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_norm)); |
8706 | m.impl("linalg_norm.out" , |
8707 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_norm_out)); |
8708 | m.impl("linalg_norm.ord_str" , |
8709 | TORCH_FN(wrapper_CompositeImplicitAutograd_ord_str_linalg_norm)); |
8710 | m.impl("linalg_norm.ord_str_out" , |
8711 | TORCH_FN(wrapper_CompositeImplicitAutograd_ord_str_out_linalg_norm_out)); |
8712 | m.impl("linalg_matrix_norm" , |
8713 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_matrix_norm)); |
8714 | m.impl("linalg_matrix_norm.out" , |
8715 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_matrix_norm_out)); |
8716 | m.impl("linalg_matrix_norm.str_ord" , |
8717 | TORCH_FN(wrapper_CompositeImplicitAutograd_str_ord_linalg_matrix_norm)); |
8718 | m.impl("linalg_matrix_norm.str_ord_out" , |
8719 | TORCH_FN(wrapper_CompositeImplicitAutograd_str_ord_out_linalg_matrix_norm_out)); |
8720 | m.impl("linalg_svd" , |
8721 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_svd)); |
8722 | m.impl("linalg_svd.U" , |
8723 | TORCH_FN(wrapper_CompositeImplicitAutograd_U_linalg_svd_out)); |
8724 | m.impl("linalg_svdvals" , |
8725 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_svdvals)); |
8726 | m.impl("linalg_svdvals.out" , |
8727 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_svdvals_out)); |
8728 | m.impl("linalg_cond" , |
8729 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_cond)); |
8730 | m.impl("linalg_cond.out" , |
8731 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_cond_out)); |
8732 | m.impl("linalg_cond.p_str" , |
8733 | TORCH_FN(wrapper_CompositeImplicitAutograd_p_str_linalg_cond)); |
8734 | m.impl("linalg_cond.p_str_out" , |
8735 | TORCH_FN(wrapper_CompositeImplicitAutograd_p_str_out_linalg_cond_out)); |
8736 | m.impl("linalg_pinv.atol_rtol_float" , |
8737 | TORCH_FN(wrapper_CompositeImplicitAutograd_atol_rtol_float_linalg_pinv)); |
8738 | m.impl("linalg_pinv.atol_rtol_float_out" , |
8739 | TORCH_FN(wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_pinv_out)); |
8740 | m.impl("linalg_pinv" , |
8741 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_pinv)); |
8742 | m.impl("linalg_pinv.out" , |
8743 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_pinv_out)); |
8744 | m.impl("linalg_pinv.rcond_tensor" , |
8745 | TORCH_FN(wrapper_CompositeImplicitAutograd_rcond_tensor_linalg_pinv)); |
8746 | m.impl("linalg_pinv.out_rcond_tensor" , |
8747 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_rcond_tensor_linalg_pinv_out)); |
8748 | m.impl("linalg_solve_ex" , |
8749 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_solve_ex)); |
8750 | m.impl("linalg_solve_ex.out" , |
8751 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_solve_ex_out)); |
8752 | m.impl("linalg_solve" , |
8753 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_solve)); |
8754 | m.impl("linalg_solve.out" , |
8755 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_solve_out)); |
8756 | m.impl("linalg_tensorinv" , |
8757 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_tensorinv)); |
8758 | m.impl("linalg_tensorinv.out" , |
8759 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_tensorinv_out)); |
8760 | m.impl("linalg_tensorsolve" , |
8761 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_tensorsolve)); |
8762 | m.impl("linalg_tensorsolve.out" , |
8763 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_tensorsolve_out)); |
8764 | m.impl("linalg_matrix_power" , |
8765 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_matrix_power)); |
8766 | m.impl("linalg_matrix_power.out" , |
8767 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_matrix_power_out)); |
8768 | m.impl("linalg_matrix_rank.atol_rtol_tensor" , |
8769 | TORCH_FN(wrapper_CompositeImplicitAutograd_atol_rtol_tensor_linalg_matrix_rank)); |
8770 | m.impl("linalg_matrix_rank.atol_rtol_tensor_out" , |
8771 | TORCH_FN(wrapper_CompositeImplicitAutograd_atol_rtol_tensor_out_linalg_matrix_rank_out)); |
8772 | m.impl("linalg_matrix_rank.atol_rtol_float" , |
8773 | TORCH_FN(wrapper_CompositeImplicitAutograd_atol_rtol_float_linalg_matrix_rank)); |
8774 | m.impl("linalg_matrix_rank.atol_rtol_float_out" , |
8775 | TORCH_FN(wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_matrix_rank_out)); |
8776 | m.impl("linalg_matrix_rank" , |
8777 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_matrix_rank)); |
8778 | m.impl("linalg_matrix_rank.out" , |
8779 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_matrix_rank_out)); |
8780 | m.impl("linalg_matrix_rank.tol_tensor" , |
8781 | TORCH_FN(wrapper_CompositeImplicitAutograd_tol_tensor_linalg_matrix_rank)); |
8782 | m.impl("linalg_matrix_rank.out_tol_tensor" , |
8783 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_tol_tensor_linalg_matrix_rank_out)); |
8784 | m.impl("linalg_multi_dot" , |
8785 | TORCH_FN(wrapper_CompositeImplicitAutograd__linalg_multi_dot)); |
8786 | m.impl("linalg_multi_dot.out" , |
8787 | TORCH_FN(wrapper_CompositeImplicitAutograd_out_linalg_multi_dot_out)); |
8788 | m.impl("nested_to_padded_tensor" , |
8789 | TORCH_FN(wrapper_CompositeImplicitAutograd__nested_to_padded_tensor)); |
8790 | m.impl("_test_serialization_subcmul" , |
8791 | TORCH_FN(wrapper_CompositeImplicitAutograd___test_serialization_subcmul)); |
8792 | m.impl("_test_string_default" , |
8793 | TORCH_FN(wrapper_CompositeImplicitAutograd___test_string_default)); |
8794 | m.impl("_test_ambiguous_defaults.a" , |
8795 | TORCH_FN(wrapper_CompositeImplicitAutograd_a__test_ambiguous_defaults)); |
8796 | m.impl("_test_ambiguous_defaults.b" , |
8797 | TORCH_FN(wrapper_CompositeImplicitAutograd_b__test_ambiguous_defaults)); |
8798 | m.impl("_test_autograd_multiple_dispatch.ntonly" , |
8799 | TORCH_FN(wrapper_CompositeImplicitAutograd_ntonly__test_autograd_multiple_dispatch)); |
8800 | m.impl("pad_sequence" , |
8801 | TORCH_FN(wrapper_CompositeImplicitAutograd__pad_sequence)); |
8802 | m.impl("flatten_dense_tensors" , |
8803 | TORCH_FN(wrapper_CompositeImplicitAutograd__flatten_dense_tensors)); |
8804 | m.impl("unflatten_dense_tensors" , |
8805 | TORCH_FN(wrapper_CompositeImplicitAutograd__unflatten_dense_tensors)); |
8806 | m.impl("scaled_dot_product_attention" , |
8807 | TORCH_FN(wrapper_CompositeImplicitAutograd__scaled_dot_product_attention)); |
8808 | m.impl("_scaled_dot_product_attention" , |
8809 | TORCH_FN(wrapper_CompositeImplicitAutograd___scaled_dot_product_attention)); |
8810 | m.impl("_scaled_dot_product_attention_math" , |
8811 | TORCH_FN(wrapper_CompositeImplicitAutograd___scaled_dot_product_attention_math)); |
8812 | m.impl("special_chebyshev_polynomial_t.x_scalar" , |
8813 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_t)); |
8814 | m.impl("special_chebyshev_polynomial_t.x_scalar_out" , |
8815 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_t_out)); |
8816 | m.impl("special_chebyshev_polynomial_t.n_scalar" , |
8817 | TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_t)); |
8818 | m.impl("special_chebyshev_polynomial_u.x_scalar" , |
8819 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_u)); |
8820 | m.impl("special_chebyshev_polynomial_u.x_scalar_out" , |
8821 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_u_out)); |
8822 | m.impl("special_chebyshev_polynomial_u.n_scalar" , |
8823 | TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_u)); |
8824 | m.impl("special_chebyshev_polynomial_v.x_scalar" , |
8825 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_v)); |
8826 | m.impl("special_chebyshev_polynomial_v.x_scalar_out" , |
8827 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_v_out)); |
8828 | m.impl("special_chebyshev_polynomial_v.n_scalar" , |
8829 | TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_v)); |
8830 | m.impl("special_chebyshev_polynomial_w.x_scalar" , |
8831 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_w)); |
8832 | m.impl("special_chebyshev_polynomial_w.x_scalar_out" , |
8833 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_w_out)); |
8834 | m.impl("special_chebyshev_polynomial_w.n_scalar" , |
8835 | TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_w)); |
8836 | m.impl("special_hermite_polynomial_h.x_scalar" , |
8837 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_hermite_polynomial_h)); |
8838 | m.impl("special_hermite_polynomial_h.x_scalar_out" , |
8839 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_h_out)); |
8840 | m.impl("special_hermite_polynomial_h.n_scalar" , |
8841 | TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_hermite_polynomial_h)); |
8842 | m.impl("special_hermite_polynomial_he.x_scalar" , |
8843 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_hermite_polynomial_he)); |
8844 | m.impl("special_hermite_polynomial_he.x_scalar_out" , |
8845 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_he_out)); |
8846 | m.impl("special_hermite_polynomial_he.n_scalar" , |
8847 | TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_hermite_polynomial_he)); |
8848 | m.impl("special_laguerre_polynomial_l.x_scalar" , |
8849 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_laguerre_polynomial_l)); |
8850 | m.impl("special_laguerre_polynomial_l.x_scalar_out" , |
8851 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_laguerre_polynomial_l_out)); |
8852 | m.impl("special_laguerre_polynomial_l.n_scalar" , |
8853 | TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_laguerre_polynomial_l)); |
8854 | m.impl("special_legendre_polynomial_p.x_scalar" , |
8855 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_legendre_polynomial_p)); |
8856 | m.impl("special_legendre_polynomial_p.x_scalar_out" , |
8857 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_legendre_polynomial_p_out)); |
8858 | m.impl("special_legendre_polynomial_p.n_scalar" , |
8859 | TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_legendre_polynomial_p)); |
8860 | m.impl("special_shifted_chebyshev_polynomial_t.x_scalar" , |
8861 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_t)); |
8862 | m.impl("special_shifted_chebyshev_polynomial_t.x_scalar_out" , |
8863 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_t_out)); |
8864 | m.impl("special_shifted_chebyshev_polynomial_t.n_scalar" , |
8865 | TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_t)); |
8866 | m.impl("special_shifted_chebyshev_polynomial_u.x_scalar" , |
8867 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_u)); |
8868 | m.impl("special_shifted_chebyshev_polynomial_u.x_scalar_out" , |
8869 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_u_out)); |
8870 | m.impl("special_shifted_chebyshev_polynomial_u.n_scalar" , |
8871 | TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_u)); |
8872 | m.impl("special_shifted_chebyshev_polynomial_v.x_scalar" , |
8873 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_v)); |
8874 | m.impl("special_shifted_chebyshev_polynomial_v.x_scalar_out" , |
8875 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_v_out)); |
8876 | m.impl("special_shifted_chebyshev_polynomial_v.n_scalar" , |
8877 | TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_v)); |
8878 | m.impl("special_shifted_chebyshev_polynomial_w.x_scalar" , |
8879 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_w)); |
8880 | m.impl("special_shifted_chebyshev_polynomial_w.x_scalar_out" , |
8881 | TORCH_FN(wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_w_out)); |
8882 | m.impl("special_shifted_chebyshev_polynomial_w.n_scalar" , |
8883 | TORCH_FN(wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_w)); |
8884 | }; |
8885 | } // anonymous namespace |
8886 | namespace compositeimplicitautograd { |
8887 | at::Tensor _cast_Byte(const at::Tensor & self, bool non_blocking) { |
8888 | return wrapper_CompositeImplicitAutograd___cast_Byte(self, non_blocking); |
8889 | } |
8890 | at::Tensor _cast_Char(const at::Tensor & self, bool non_blocking) { |
8891 | return wrapper_CompositeImplicitAutograd___cast_Char(self, non_blocking); |
8892 | } |
8893 | at::Tensor _cast_Double(const at::Tensor & self, bool non_blocking) { |
8894 | return wrapper_CompositeImplicitAutograd___cast_Double(self, non_blocking); |
8895 | } |
8896 | at::Tensor _cast_Float(const at::Tensor & self, bool non_blocking) { |
8897 | return wrapper_CompositeImplicitAutograd___cast_Float(self, non_blocking); |
8898 | } |
8899 | at::Tensor _cast_Int(const at::Tensor & self, bool non_blocking) { |
8900 | return wrapper_CompositeImplicitAutograd___cast_Int(self, non_blocking); |
8901 | } |
8902 | at::Tensor _cast_Long(const at::Tensor & self, bool non_blocking) { |
8903 | return wrapper_CompositeImplicitAutograd___cast_Long(self, non_blocking); |
8904 | } |
8905 | at::Tensor _cast_Short(const at::Tensor & self, bool non_blocking) { |
8906 | return wrapper_CompositeImplicitAutograd___cast_Short(self, non_blocking); |
8907 | } |
8908 | at::Tensor _cast_Half(const at::Tensor & self, bool non_blocking) { |
8909 | return wrapper_CompositeImplicitAutograd___cast_Half(self, non_blocking); |
8910 | } |
8911 | void _backward(const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph) { |
8912 | return wrapper_CompositeImplicitAutograd___backward(self, inputs, gradient, retain_graph, create_graph); |
8913 | } |
8914 | void set_data(at::Tensor & self, const at::Tensor & new_data) { |
8915 | return wrapper_CompositeImplicitAutograd__set_data(self, new_data); |
8916 | } |
8917 | at::Tensor data(const at::Tensor & self) { |
8918 | return wrapper_CompositeImplicitAutograd__data(self); |
8919 | } |
8920 | bool is_leaf(const at::Tensor & self) { |
8921 | return wrapper_CompositeImplicitAutograd__is_leaf(self); |
8922 | } |
8923 | int64_t output_nr(const at::Tensor & self) { |
8924 | return wrapper_CompositeImplicitAutograd__output_nr(self); |
8925 | } |
8926 | int64_t _version(const at::Tensor & self) { |
8927 | return wrapper_CompositeImplicitAutograd___version(self); |
8928 | } |
8929 | at::Tensor & requires_grad_(at::Tensor & self, bool requires_grad) { |
8930 | return wrapper_CompositeImplicitAutograd__requires_grad_(self, requires_grad); |
8931 | } |
8932 | void retain_grad(at::Tensor & self) { |
8933 | return wrapper_CompositeImplicitAutograd__retain_grad(self); |
8934 | } |
8935 | bool retains_grad(const at::Tensor & self) { |
8936 | return wrapper_CompositeImplicitAutograd__retains_grad(self); |
8937 | } |
8938 | ::std::tuple<at::Tensor,at::Tensor> _unpack_dual(const at::Tensor & dual, int64_t level) { |
8939 | return wrapper_CompositeImplicitAutograd___unpack_dual(dual, level); |
8940 | } |
8941 | at::Tensor & rename_(at::Tensor & self, c10::optional<at::DimnameList> names) { |
8942 | return wrapper_CompositeImplicitAutograd__rename_(self, names); |
8943 | } |
8944 | at::Tensor rename(const at::Tensor & self, c10::optional<at::DimnameList> names) { |
8945 | return wrapper_CompositeImplicitAutograd__rename(self, names); |
8946 | } |
8947 | at::Tensor align_to(const at::Tensor & self, at::DimnameList names) { |
8948 | return wrapper_CompositeImplicitAutograd__align_to(self, names); |
8949 | } |
8950 | at::Tensor align_to(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) { |
8951 | return wrapper_CompositeImplicitAutograd_ellipsis_idx_align_to(self, order, ellipsis_idx); |
8952 | } |
8953 | at::Tensor align_as(const at::Tensor & self, const at::Tensor & other) { |
8954 | return wrapper_CompositeImplicitAutograd__align_as(self, other); |
8955 | } |
8956 | ::std::vector<at::Tensor> align_tensors(at::TensorList tensors) { |
8957 | return wrapper_CompositeImplicitAutograd__align_tensors(tensors); |
8958 | } |
8959 | void _assert_tensor_metadata(const at::Tensor & a, at::OptionalIntArrayRef size, at::OptionalIntArrayRef stride, c10::optional<at::ScalarType> dtype) { |
8960 | return wrapper_CompositeImplicitAutograd___assert_tensor_metadata(a, size, stride, dtype); |
8961 | } |
8962 | at::Tensor refine_names(const at::Tensor & self, at::DimnameList names) { |
8963 | return wrapper_CompositeImplicitAutograd__refine_names(self, names); |
8964 | } |
8965 | bool _use_cudnn_rnn_flatten_weight() { |
8966 | return wrapper_CompositeImplicitAutograd___use_cudnn_rnn_flatten_weight(); |
8967 | } |
8968 | int64_t _debug_has_internal_overlap(const at::Tensor & self) { |
8969 | return wrapper_CompositeImplicitAutograd___debug_has_internal_overlap(self); |
8970 | } |
8971 | ::std::tuple<at::Tensor,at::Tensor> _sobol_engine_draw(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<at::ScalarType> dtype) { |
8972 | return wrapper_CompositeImplicitAutograd___sobol_engine_draw(quasi, n, sobolstate, dimension, num_generated, dtype); |
8973 | } |
8974 | at::Tensor & _sobol_engine_ff_(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) { |
8975 | return wrapper_CompositeImplicitAutograd___sobol_engine_ff_(self, n, sobolstate, dimension, num_generated); |
8976 | } |
8977 | at::Tensor & _sobol_engine_scramble_(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) { |
8978 | return wrapper_CompositeImplicitAutograd___sobol_engine_scramble_(self, ltm, dimension); |
8979 | } |
8980 | at::Tensor & _sobol_engine_initialize_state_(at::Tensor & self, int64_t dimension) { |
8981 | return wrapper_CompositeImplicitAutograd___sobol_engine_initialize_state_(self, dimension); |
8982 | } |
8983 | at::Tensor _reshape_from_tensor(const at::Tensor & self, const at::Tensor & shape) { |
8984 | return wrapper_CompositeImplicitAutograd___reshape_from_tensor(self, shape); |
8985 | } |
8986 | at::Tensor _shape_as_tensor(const at::Tensor & self) { |
8987 | return wrapper_CompositeImplicitAutograd___shape_as_tensor(self); |
8988 | } |
8989 | at::Tensor dropout(const at::Tensor & input, double p, bool train) { |
8990 | return wrapper_CompositeImplicitAutograd__dropout(input, p, train); |
8991 | } |
8992 | at::Tensor & dropout_(at::Tensor & self, double p, bool train) { |
8993 | return wrapper_CompositeImplicitAutograd__dropout_(self, p, train); |
8994 | } |
8995 | at::Tensor feature_dropout(const at::Tensor & input, double p, bool train) { |
8996 | return wrapper_CompositeImplicitAutograd__feature_dropout(input, p, train); |
8997 | } |
8998 | at::Tensor & feature_dropout_(at::Tensor & self, double p, bool train) { |
8999 | return wrapper_CompositeImplicitAutograd__feature_dropout_(self, p, train); |
9000 | } |
9001 | at::Tensor alpha_dropout(const at::Tensor & input, double p, bool train) { |
9002 | return wrapper_CompositeImplicitAutograd__alpha_dropout(input, p, train); |
9003 | } |
9004 | at::Tensor & alpha_dropout_(at::Tensor & self, double p, bool train) { |
9005 | return wrapper_CompositeImplicitAutograd__alpha_dropout_(self, p, train); |
9006 | } |
9007 | at::Tensor feature_alpha_dropout(const at::Tensor & input, double p, bool train) { |
9008 | return wrapper_CompositeImplicitAutograd__feature_alpha_dropout(input, p, train); |
9009 | } |
9010 | at::Tensor & feature_alpha_dropout_(at::Tensor & self, double p, bool train) { |
9011 | return wrapper_CompositeImplicitAutograd__feature_alpha_dropout_(self, p, train); |
9012 | } |
9013 | at::Tensor absolute(const at::Tensor & self) { |
9014 | return wrapper_CompositeImplicitAutograd__absolute(self); |
9015 | } |
9016 | at::Tensor & absolute_out(at::Tensor & out, const at::Tensor & self) { |
9017 | return wrapper_CompositeImplicitAutograd_out_absolute_out(self, out); |
9018 | } |
9019 | at::Tensor & absolute_outf(const at::Tensor & self, at::Tensor & out) { |
9020 | return wrapper_CompositeImplicitAutograd_out_absolute_out(self, out); |
9021 | } |
9022 | at::Tensor & absolute_(at::Tensor & self) { |
9023 | return wrapper_CompositeImplicitAutograd__absolute_(self); |
9024 | } |
9025 | at::Tensor chalf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) { |
9026 | return wrapper_CompositeImplicitAutograd__chalf(self, memory_format); |
9027 | } |
9028 | at::Tensor real(const at::Tensor & self) { |
9029 | return wrapper_CompositeImplicitAutograd__real(self); |
9030 | } |
9031 | at::Tensor imag(const at::Tensor & self) { |
9032 | return wrapper_CompositeImplicitAutograd__imag(self); |
9033 | } |
9034 | at::Tensor conj(const at::Tensor & self) { |
9035 | return wrapper_CompositeImplicitAutograd__conj(self); |
9036 | } |
9037 | at::Tensor conj_physical(const at::Tensor & self) { |
9038 | return wrapper_CompositeImplicitAutograd__conj_physical(self); |
9039 | } |
9040 | at::Tensor resolve_conj(const at::Tensor & self) { |
9041 | return wrapper_CompositeImplicitAutograd__resolve_conj(self); |
9042 | } |
9043 | at::Tensor resolve_neg(const at::Tensor & self) { |
9044 | return wrapper_CompositeImplicitAutograd__resolve_neg(self); |
9045 | } |
9046 | at::Tensor arccos(const at::Tensor & self) { |
9047 | return wrapper_CompositeImplicitAutograd__arccos(self); |
9048 | } |
9049 | at::Tensor & arccos_out(at::Tensor & out, const at::Tensor & self) { |
9050 | return wrapper_CompositeImplicitAutograd_out_arccos_out(self, out); |
9051 | } |
9052 | at::Tensor & arccos_outf(const at::Tensor & self, at::Tensor & out) { |
9053 | return wrapper_CompositeImplicitAutograd_out_arccos_out(self, out); |
9054 | } |
9055 | at::Tensor & arccos_(at::Tensor & self) { |
9056 | return wrapper_CompositeImplicitAutograd__arccos_(self); |
9057 | } |
9058 | at::Tensor avg_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) { |
9059 | return wrapper_CompositeImplicitAutograd__avg_pool1d(self, kernel_size, stride, padding, ceil_mode, count_include_pad); |
9060 | } |
9061 | at::Tensor adaptive_avg_pool1d(const at::Tensor & self, at::IntArrayRef output_size) { |
9062 | return wrapper_CompositeImplicitAutograd__adaptive_avg_pool1d(self, output_size); |
9063 | } |
9064 | ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool1d(const at::Tensor & self, at::IntArrayRef output_size) { |
9065 | return wrapper_CompositeImplicitAutograd__adaptive_max_pool1d(self, output_size); |
9066 | } |
9067 | at::Tensor affine_grid_generator_backward(const at::Tensor & grad, at::IntArrayRef size, bool align_corners) { |
9068 | return wrapper_CompositeImplicitAutograd__affine_grid_generator_backward(grad, size, align_corners); |
9069 | } |
9070 | at::Tensor _test_check_tensor(const at::Tensor & self) { |
9071 | return wrapper_CompositeImplicitAutograd___test_check_tensor(self); |
9072 | } |
9073 | at::Tensor all(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9074 | return wrapper_CompositeImplicitAutograd_dimname_all(self, dim, keepdim); |
9075 | } |
9076 | at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9077 | return wrapper_CompositeImplicitAutograd_dimname_out_all_out(self, dim, keepdim, out); |
9078 | } |
9079 | at::Tensor & all_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) { |
9080 | return wrapper_CompositeImplicitAutograd_dimname_out_all_out(self, dim, keepdim, out); |
9081 | } |
9082 | at::Tensor any(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9083 | return wrapper_CompositeImplicitAutograd_dimname_any(self, dim, keepdim); |
9084 | } |
9085 | at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9086 | return wrapper_CompositeImplicitAutograd_dimname_out_any_out(self, dim, keepdim, out); |
9087 | } |
9088 | at::Tensor & any_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) { |
9089 | return wrapper_CompositeImplicitAutograd_dimname_out_any_out(self, dim, keepdim, out); |
9090 | } |
9091 | at::Tensor _dim_arange(const at::Tensor & like, int64_t dim) { |
9092 | return wrapper_CompositeImplicitAutograd___dim_arange(like, dim); |
9093 | } |
9094 | at::Tensor arccosh(const at::Tensor & self) { |
9095 | return wrapper_CompositeImplicitAutograd__arccosh(self); |
9096 | } |
9097 | at::Tensor & arccosh_out(at::Tensor & out, const at::Tensor & self) { |
9098 | return wrapper_CompositeImplicitAutograd_out_arccosh_out(self, out); |
9099 | } |
9100 | at::Tensor & arccosh_outf(const at::Tensor & self, at::Tensor & out) { |
9101 | return wrapper_CompositeImplicitAutograd_out_arccosh_out(self, out); |
9102 | } |
9103 | at::Tensor & arccosh_(at::Tensor & self) { |
9104 | return wrapper_CompositeImplicitAutograd__arccosh_(self); |
9105 | } |
9106 | at::Tensor arcsinh(const at::Tensor & self) { |
9107 | return wrapper_CompositeImplicitAutograd__arcsinh(self); |
9108 | } |
9109 | at::Tensor & arcsinh_out(at::Tensor & out, const at::Tensor & self) { |
9110 | return wrapper_CompositeImplicitAutograd_out_arcsinh_out(self, out); |
9111 | } |
9112 | at::Tensor & arcsinh_outf(const at::Tensor & self, at::Tensor & out) { |
9113 | return wrapper_CompositeImplicitAutograd_out_arcsinh_out(self, out); |
9114 | } |
9115 | at::Tensor & arcsinh_(at::Tensor & self) { |
9116 | return wrapper_CompositeImplicitAutograd__arcsinh_(self); |
9117 | } |
9118 | at::Tensor arctanh(const at::Tensor & self) { |
9119 | return wrapper_CompositeImplicitAutograd__arctanh(self); |
9120 | } |
9121 | at::Tensor & arctanh_out(at::Tensor & out, const at::Tensor & self) { |
9122 | return wrapper_CompositeImplicitAutograd_out_arctanh_out(self, out); |
9123 | } |
9124 | at::Tensor & arctanh_outf(const at::Tensor & self, at::Tensor & out) { |
9125 | return wrapper_CompositeImplicitAutograd_out_arctanh_out(self, out); |
9126 | } |
9127 | at::Tensor & arctanh_(at::Tensor & self) { |
9128 | return wrapper_CompositeImplicitAutograd__arctanh_(self); |
9129 | } |
9130 | at::Tensor arcsin(const at::Tensor & self) { |
9131 | return wrapper_CompositeImplicitAutograd__arcsin(self); |
9132 | } |
9133 | at::Tensor & arcsin_out(at::Tensor & out, const at::Tensor & self) { |
9134 | return wrapper_CompositeImplicitAutograd_out_arcsin_out(self, out); |
9135 | } |
9136 | at::Tensor & arcsin_outf(const at::Tensor & self, at::Tensor & out) { |
9137 | return wrapper_CompositeImplicitAutograd_out_arcsin_out(self, out); |
9138 | } |
9139 | at::Tensor & arcsin_(at::Tensor & self) { |
9140 | return wrapper_CompositeImplicitAutograd__arcsin_(self); |
9141 | } |
9142 | at::Tensor arctan(const at::Tensor & self) { |
9143 | return wrapper_CompositeImplicitAutograd__arctan(self); |
9144 | } |
9145 | at::Tensor & arctan_out(at::Tensor & out, const at::Tensor & self) { |
9146 | return wrapper_CompositeImplicitAutograd_out_arctan_out(self, out); |
9147 | } |
9148 | at::Tensor & arctan_outf(const at::Tensor & self, at::Tensor & out) { |
9149 | return wrapper_CompositeImplicitAutograd_out_arctan_out(self, out); |
9150 | } |
9151 | at::Tensor & arctan_(at::Tensor & self) { |
9152 | return wrapper_CompositeImplicitAutograd__arctan_(self); |
9153 | } |
9154 | at::Tensor atleast_1d(const at::Tensor & self) { |
9155 | return wrapper_CompositeImplicitAutograd__atleast_1d(self); |
9156 | } |
9157 | ::std::vector<at::Tensor> atleast_1d(at::TensorList tensors) { |
9158 | return wrapper_CompositeImplicitAutograd_Sequence_atleast_1d(tensors); |
9159 | } |
9160 | at::Tensor atleast_2d(const at::Tensor & self) { |
9161 | return wrapper_CompositeImplicitAutograd__atleast_2d(self); |
9162 | } |
9163 | ::std::vector<at::Tensor> atleast_2d(at::TensorList tensors) { |
9164 | return wrapper_CompositeImplicitAutograd_Sequence_atleast_2d(tensors); |
9165 | } |
9166 | at::Tensor atleast_3d(const at::Tensor & self) { |
9167 | return wrapper_CompositeImplicitAutograd__atleast_3d(self); |
9168 | } |
9169 | ::std::vector<at::Tensor> atleast_3d(at::TensorList tensors) { |
9170 | return wrapper_CompositeImplicitAutograd_Sequence_atleast_3d(tensors); |
9171 | } |
9172 | at::Tensor batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { |
9173 | return wrapper_CompositeImplicitAutograd__batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); |
9174 | } |
9175 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t> _batch_norm_impl_index(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { |
9176 | return wrapper_CompositeImplicitAutograd___batch_norm_impl_index(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); |
9177 | } |
9178 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) { |
9179 | return wrapper_CompositeImplicitAutograd___batch_norm_impl_index_backward(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace); |
9180 | } |
9181 | at::Tensor bilinear(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) { |
9182 | return wrapper_CompositeImplicitAutograd__bilinear(input1, input2, weight, bias); |
9183 | } |
9184 | ::std::vector<at::Tensor> broadcast_tensors(at::TensorList tensors) { |
9185 | return wrapper_CompositeImplicitAutograd__broadcast_tensors(tensors); |
9186 | } |
9187 | at::Tensor broadcast_to(const at::Tensor & self, at::IntArrayRef size) { |
9188 | return wrapper_CompositeImplicitAutograd__broadcast_to(self, c10::fromIntArrayRefSlow(size)); |
9189 | } |
9190 | at::Tensor broadcast_to_symint(const at::Tensor & self, c10::SymIntArrayRef size) { |
9191 | return wrapper_CompositeImplicitAutograd__broadcast_to(self, size); |
9192 | } |
9193 | at::Tensor cat(at::TensorList tensors, at::Dimname dim) { |
9194 | return wrapper_CompositeImplicitAutograd_names_cat(tensors, dim); |
9195 | } |
9196 | at::Tensor & cat_out(at::Tensor & out, at::TensorList tensors, at::Dimname dim) { |
9197 | return wrapper_CompositeImplicitAutograd_names_out_cat_out(tensors, dim, out); |
9198 | } |
9199 | at::Tensor & cat_outf(at::TensorList tensors, at::Dimname dim, at::Tensor & out) { |
9200 | return wrapper_CompositeImplicitAutograd_names_out_cat_out(tensors, dim, out); |
9201 | } |
9202 | at::Tensor concat(at::TensorList tensors, int64_t dim) { |
9203 | return wrapper_CompositeImplicitAutograd__concat(tensors, dim); |
9204 | } |
9205 | at::Tensor & concat_out(at::Tensor & out, at::TensorList tensors, int64_t dim) { |
9206 | return wrapper_CompositeImplicitAutograd_out_concat_out(tensors, dim, out); |
9207 | } |
9208 | at::Tensor & concat_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) { |
9209 | return wrapper_CompositeImplicitAutograd_out_concat_out(tensors, dim, out); |
9210 | } |
9211 | at::Tensor concat(at::TensorList tensors, at::Dimname dim) { |
9212 | return wrapper_CompositeImplicitAutograd_names_concat(tensors, dim); |
9213 | } |
9214 | at::Tensor & concat_out(at::Tensor & out, at::TensorList tensors, at::Dimname dim) { |
9215 | return wrapper_CompositeImplicitAutograd_names_out_concat_out(tensors, dim, out); |
9216 | } |
9217 | at::Tensor & concat_outf(at::TensorList tensors, at::Dimname dim, at::Tensor & out) { |
9218 | return wrapper_CompositeImplicitAutograd_names_out_concat_out(tensors, dim, out); |
9219 | } |
9220 | at::Tensor concatenate(at::TensorList tensors, int64_t dim) { |
9221 | return wrapper_CompositeImplicitAutograd__concatenate(tensors, dim); |
9222 | } |
9223 | at::Tensor & concatenate_out(at::Tensor & out, at::TensorList tensors, int64_t dim) { |
9224 | return wrapper_CompositeImplicitAutograd_out_concatenate_out(tensors, dim, out); |
9225 | } |
9226 | at::Tensor & concatenate_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) { |
9227 | return wrapper_CompositeImplicitAutograd_out_concatenate_out(tensors, dim, out); |
9228 | } |
9229 | at::Tensor concatenate(at::TensorList tensors, at::Dimname dim) { |
9230 | return wrapper_CompositeImplicitAutograd_names_concatenate(tensors, dim); |
9231 | } |
9232 | at::Tensor & concatenate_out(at::Tensor & out, at::TensorList tensors, at::Dimname dim) { |
9233 | return wrapper_CompositeImplicitAutograd_names_out_concatenate_out(tensors, dim, out); |
9234 | } |
9235 | at::Tensor & concatenate_outf(at::TensorList tensors, at::Dimname dim, at::Tensor & out) { |
9236 | return wrapper_CompositeImplicitAutograd_names_out_concatenate_out(tensors, dim, out); |
9237 | } |
9238 | at::Tensor chain_matmul(at::TensorList matrices) { |
9239 | return wrapper_CompositeImplicitAutograd__chain_matmul(matrices); |
9240 | } |
9241 | at::Tensor & chain_matmul_out(at::Tensor & out, at::TensorList matrices) { |
9242 | return wrapper_CompositeImplicitAutograd_out_chain_matmul_out(matrices, out); |
9243 | } |
9244 | at::Tensor & chain_matmul_outf(at::TensorList matrices, at::Tensor & out) { |
9245 | return wrapper_CompositeImplicitAutograd_out_chain_matmul_out(matrices, out); |
9246 | } |
9247 | ::std::vector<at::Tensor> unsafe_chunk(const at::Tensor & self, int64_t chunks, int64_t dim) { |
9248 | return wrapper_CompositeImplicitAutograd__unsafe_chunk(self, chunks, dim); |
9249 | } |
9250 | ::std::vector<at::Tensor> chunk(const at::Tensor & self, int64_t chunks, int64_t dim) { |
9251 | return wrapper_CompositeImplicitAutograd__chunk(self, chunks, dim); |
9252 | } |
9253 | ::std::vector<at::Tensor> tensor_split(const at::Tensor & self, int64_t sections, int64_t dim) { |
9254 | return wrapper_CompositeImplicitAutograd_sections_tensor_split(self, sections, dim); |
9255 | } |
9256 | ::std::vector<at::Tensor> tensor_split_symint(const at::Tensor & self, c10::SymInt sections, int64_t dim) { |
9257 | return wrapper_CompositeImplicitAutograd_sections_tensor_split(self, sections, dim); |
9258 | } |
9259 | ::std::vector<at::Tensor> tensor_split(const at::Tensor & self, at::IntArrayRef indices, int64_t dim) { |
9260 | return wrapper_CompositeImplicitAutograd_indices_tensor_split(self, c10::fromIntArrayRefSlow(indices), dim); |
9261 | } |
9262 | ::std::vector<at::Tensor> tensor_split_symint(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) { |
9263 | return wrapper_CompositeImplicitAutograd_indices_tensor_split(self, indices, dim); |
9264 | } |
9265 | ::std::vector<at::Tensor> tensor_split(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) { |
9266 | return wrapper_CompositeImplicitAutograd_tensor_indices_or_sections_tensor_split(self, tensor_indices_or_sections, dim); |
9267 | } |
9268 | at::Tensor clip(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) { |
9269 | return wrapper_CompositeImplicitAutograd__clip(self, min, max); |
9270 | } |
9271 | at::Tensor & clip_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) { |
9272 | return wrapper_CompositeImplicitAutograd_out_clip_out(self, min, max, out); |
9273 | } |
9274 | at::Tensor & clip_outf(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max, at::Tensor & out) { |
9275 | return wrapper_CompositeImplicitAutograd_out_clip_out(self, min, max, out); |
9276 | } |
9277 | at::Tensor & clip_(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) { |
9278 | return wrapper_CompositeImplicitAutograd__clip_(self, min, max); |
9279 | } |
9280 | at::Tensor clip(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) { |
9281 | return wrapper_CompositeImplicitAutograd_Tensor_clip(self, min, max); |
9282 | } |
9283 | at::Tensor & clip_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) { |
9284 | return wrapper_CompositeImplicitAutograd_Tensor_out_clip_out(self, min, max, out); |
9285 | } |
9286 | at::Tensor & clip_outf(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max, at::Tensor & out) { |
9287 | return wrapper_CompositeImplicitAutograd_Tensor_out_clip_out(self, min, max, out); |
9288 | } |
9289 | at::Tensor & clip_(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) { |
9290 | return wrapper_CompositeImplicitAutograd_Tensor_clip_(self, min, max); |
9291 | } |
9292 | bool cudnn_is_acceptable(const at::Tensor & self) { |
9293 | return wrapper_CompositeImplicitAutograd__cudnn_is_acceptable(self); |
9294 | } |
9295 | at::Tensor contiguous(const at::Tensor & self, at::MemoryFormat memory_format) { |
9296 | return wrapper_CompositeImplicitAutograd__contiguous(self, memory_format); |
9297 | } |
9298 | at::Tensor _convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) { |
9299 | return wrapper_CompositeImplicitAutograd_deprecated__convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); |
9300 | } |
9301 | at::Tensor _convolution_mode(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) { |
9302 | return wrapper_CompositeImplicitAutograd___convolution_mode(input, weight, bias, stride, padding, dilation, groups); |
9303 | } |
9304 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) { |
9305 | return wrapper_CompositeImplicitAutograd___convolution_double_backward(ggI, ggW, ggb, gO, weight, self, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask); |
9306 | } |
9307 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward_symint(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) { |
9308 | return wrapper_CompositeImplicitAutograd___convolution_double_backward(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask); |
9309 | } |
9310 | at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { |
9311 | return wrapper_CompositeImplicitAutograd__conv1d(input, weight, bias, stride, padding, dilation, groups); |
9312 | } |
9313 | at::Tensor conv2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { |
9314 | return wrapper_CompositeImplicitAutograd__conv2d(input, weight, bias, stride, padding, dilation, groups); |
9315 | } |
9316 | at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { |
9317 | return wrapper_CompositeImplicitAutograd__conv3d(input, weight, bias, stride, padding, dilation, groups); |
9318 | } |
9319 | at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) { |
9320 | return wrapper_CompositeImplicitAutograd_padding_conv1d(input, weight, bias, stride, padding, dilation, groups); |
9321 | } |
9322 | at::Tensor conv2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) { |
9323 | return wrapper_CompositeImplicitAutograd_padding_conv2d(input, weight, bias, stride, padding, dilation, groups); |
9324 | } |
9325 | at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) { |
9326 | return wrapper_CompositeImplicitAutograd_padding_conv3d(input, weight, bias, stride, padding, dilation, groups); |
9327 | } |
9328 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) { |
9329 | return wrapper_CompositeImplicitAutograd__conv_tbc_backward(self, input, weight, bias, pad); |
9330 | } |
9331 | at::Tensor conv_transpose1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) { |
9332 | return wrapper_CompositeImplicitAutograd__conv_transpose1d(input, weight, bias, stride, padding, output_padding, groups, dilation); |
9333 | } |
9334 | at::Tensor conv_transpose2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) { |
9335 | return wrapper_CompositeImplicitAutograd_input_conv_transpose2d(input, weight, bias, stride, padding, output_padding, groups, dilation); |
9336 | } |
9337 | at::Tensor conv_transpose3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) { |
9338 | return wrapper_CompositeImplicitAutograd_input_conv_transpose3d(input, weight, bias, stride, padding, output_padding, groups, dilation); |
9339 | } |
9340 | at::Tensor cosine_embedding_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) { |
9341 | return wrapper_CompositeImplicitAutograd__cosine_embedding_loss(input1, input2, target, margin, reduction); |
9342 | } |
9343 | at::Tensor cov(const at::Tensor & self, int64_t correction, const c10::optional<at::Tensor> & fweights, const c10::optional<at::Tensor> & aweights) { |
9344 | return wrapper_CompositeImplicitAutograd__cov(self, correction, fweights, aweights); |
9345 | } |
9346 | at::Tensor corrcoef(const at::Tensor & self) { |
9347 | return wrapper_CompositeImplicitAutograd__corrcoef(self); |
9348 | } |
9349 | ::std::tuple<at::Tensor,at::Tensor> cummax(const at::Tensor & self, at::Dimname dim) { |
9350 | return wrapper_CompositeImplicitAutograd_dimname_cummax(self, dim); |
9351 | } |
9352 | ::std::tuple<at::Tensor &,at::Tensor &> cummax_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim) { |
9353 | return wrapper_CompositeImplicitAutograd_dimname_out_cummax_out(self, dim, values, indices); |
9354 | } |
9355 | ::std::tuple<at::Tensor &,at::Tensor &> cummax_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) { |
9356 | return wrapper_CompositeImplicitAutograd_dimname_out_cummax_out(self, dim, values, indices); |
9357 | } |
9358 | ::std::tuple<at::Tensor,at::Tensor> cummin(const at::Tensor & self, at::Dimname dim) { |
9359 | return wrapper_CompositeImplicitAutograd_dimname_cummin(self, dim); |
9360 | } |
9361 | ::std::tuple<at::Tensor &,at::Tensor &> cummin_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim) { |
9362 | return wrapper_CompositeImplicitAutograd_dimname_out_cummin_out(self, dim, values, indices); |
9363 | } |
9364 | ::std::tuple<at::Tensor &,at::Tensor &> cummin_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) { |
9365 | return wrapper_CompositeImplicitAutograd_dimname_out_cummin_out(self, dim, values, indices); |
9366 | } |
9367 | at::Tensor cummaxmin_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) { |
9368 | return wrapper_CompositeImplicitAutograd__cummaxmin_backward(grad, input, indices, dim); |
9369 | } |
9370 | at::Tensor cumprod(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
9371 | return wrapper_CompositeImplicitAutograd_dimname_cumprod(self, dim, dtype); |
9372 | } |
9373 | at::Tensor & cumprod_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
9374 | return wrapper_CompositeImplicitAutograd_dimname_out_cumprod_out(self, dim, dtype, out); |
9375 | } |
9376 | at::Tensor & cumprod_outf(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
9377 | return wrapper_CompositeImplicitAutograd_dimname_out_cumprod_out(self, dim, dtype, out); |
9378 | } |
9379 | at::Tensor & cumprod_(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
9380 | return wrapper_CompositeImplicitAutograd_dimname_cumprod_(self, dim, dtype); |
9381 | } |
9382 | at::Tensor cumprod_backward(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) { |
9383 | return wrapper_CompositeImplicitAutograd__cumprod_backward(grad, input, dim, output); |
9384 | } |
9385 | at::Tensor cumsum(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
9386 | return wrapper_CompositeImplicitAutograd_dimname_cumsum(self, dim, dtype); |
9387 | } |
9388 | at::Tensor & cumsum_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
9389 | return wrapper_CompositeImplicitAutograd_dimname_out_cumsum_out(self, dim, dtype, out); |
9390 | } |
9391 | at::Tensor & cumsum_outf(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
9392 | return wrapper_CompositeImplicitAutograd_dimname_out_cumsum_out(self, dim, dtype, out); |
9393 | } |
9394 | at::Tensor & cumsum_(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
9395 | return wrapper_CompositeImplicitAutograd_dimname_cumsum_(self, dim, dtype); |
9396 | } |
9397 | at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim) { |
9398 | return wrapper_CompositeImplicitAutograd_x_cumulative_trapezoid(y, x, dim); |
9399 | } |
9400 | at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Scalar & dx, int64_t dim) { |
9401 | return wrapper_CompositeImplicitAutograd_dx_cumulative_trapezoid(y, dx, dim); |
9402 | } |
9403 | at::Tensor ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) { |
9404 | return wrapper_CompositeImplicitAutograd_IntList_ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); |
9405 | } |
9406 | at::Tensor ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) { |
9407 | return wrapper_CompositeImplicitAutograd_Tensor_ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); |
9408 | } |
9409 | at::Tensor diagflat(const at::Tensor & self, int64_t offset) { |
9410 | return wrapper_CompositeImplicitAutograd__diagflat(self, offset); |
9411 | } |
9412 | at::Tensor linalg_diagonal(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) { |
9413 | return wrapper_CompositeImplicitAutograd__linalg_diagonal(A, offset, dim1, dim2); |
9414 | } |
9415 | at::Tensor diagonal(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) { |
9416 | return wrapper_CompositeImplicitAutograd_Dimname_diagonal(self, outdim, dim1, dim2, offset); |
9417 | } |
9418 | at::Tensor & fill_diagonal_(at::Tensor & self, const at::Scalar & fill_value, bool wrap) { |
9419 | return wrapper_CompositeImplicitAutograd__fill_diagonal_(self, fill_value, wrap); |
9420 | } |
9421 | at::Tensor diff(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append) { |
9422 | return wrapper_CompositeImplicitAutograd__diff(self, n, dim, prepend, append); |
9423 | } |
9424 | at::Tensor & diff_out(at::Tensor & out, const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append) { |
9425 | return wrapper_CompositeImplicitAutograd_out_diff_out(self, n, dim, prepend, append, out); |
9426 | } |
9427 | at::Tensor & diff_outf(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append, at::Tensor & out) { |
9428 | return wrapper_CompositeImplicitAutograd_out_diff_out(self, n, dim, prepend, append, out); |
9429 | } |
9430 | ::std::vector<at::Tensor> gradient(const at::Tensor & self, const c10::optional<at::Scalar> & spacing, c10::optional<int64_t> dim, int64_t edge_order) { |
9431 | return wrapper_CompositeImplicitAutograd_scalarint_gradient(self, spacing, dim, edge_order); |
9432 | } |
9433 | ::std::vector<at::Tensor> gradient(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) { |
9434 | return wrapper_CompositeImplicitAutograd_scalararray_gradient(self, spacing, dim, edge_order); |
9435 | } |
9436 | ::std::vector<at::Tensor> gradient(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) { |
9437 | return wrapper_CompositeImplicitAutograd_array_gradient(self, dim, edge_order); |
9438 | } |
9439 | ::std::vector<at::Tensor> gradient(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, c10::optional<int64_t> dim, int64_t edge_order) { |
9440 | return wrapper_CompositeImplicitAutograd_scalarrayint_gradient(self, spacing, dim, edge_order); |
9441 | } |
9442 | ::std::vector<at::Tensor> gradient(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order) { |
9443 | return wrapper_CompositeImplicitAutograd_scalarrayarray_gradient(self, spacing, dim, edge_order); |
9444 | } |
9445 | ::std::vector<at::Tensor> gradient(const at::Tensor & self, at::TensorList spacing, c10::optional<int64_t> dim, int64_t edge_order) { |
9446 | return wrapper_CompositeImplicitAutograd_tensorarrayint_gradient(self, spacing, dim, edge_order); |
9447 | } |
9448 | ::std::vector<at::Tensor> gradient(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) { |
9449 | return wrapper_CompositeImplicitAutograd_tensorarray_gradient(self, spacing, dim, edge_order); |
9450 | } |
9451 | at::Tensor divide(const at::Tensor & self, const at::Tensor & other) { |
9452 | return wrapper_CompositeImplicitAutograd_Tensor_divide(self, other); |
9453 | } |
9454 | at::Tensor & divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
9455 | return wrapper_CompositeImplicitAutograd_out_divide_out(self, other, out); |
9456 | } |
9457 | at::Tensor & divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
9458 | return wrapper_CompositeImplicitAutograd_out_divide_out(self, other, out); |
9459 | } |
9460 | at::Tensor & divide_(at::Tensor & self, const at::Tensor & other) { |
9461 | return wrapper_CompositeImplicitAutograd_Tensor_divide_(self, other); |
9462 | } |
9463 | at::Tensor divide(const at::Tensor & self, const at::Scalar & other) { |
9464 | return wrapper_CompositeImplicitAutograd_Scalar_divide(self, other); |
9465 | } |
9466 | at::Tensor & divide_(at::Tensor & self, const at::Scalar & other) { |
9467 | return wrapper_CompositeImplicitAutograd_Scalar_divide_(self, other); |
9468 | } |
9469 | at::Tensor divide(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) { |
9470 | return wrapper_CompositeImplicitAutograd_Tensor_mode_divide(self, other, rounding_mode); |
9471 | } |
9472 | at::Tensor & divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) { |
9473 | return wrapper_CompositeImplicitAutograd_out_mode_divide_out(self, other, rounding_mode, out); |
9474 | } |
9475 | at::Tensor & divide_outf(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) { |
9476 | return wrapper_CompositeImplicitAutograd_out_mode_divide_out(self, other, rounding_mode, out); |
9477 | } |
9478 | at::Tensor & divide_(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) { |
9479 | return wrapper_CompositeImplicitAutograd_Tensor_mode_divide_(self, other, rounding_mode); |
9480 | } |
9481 | at::Tensor divide(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) { |
9482 | return wrapper_CompositeImplicitAutograd_Scalar_mode_divide(self, other, rounding_mode); |
9483 | } |
9484 | at::Tensor & divide_(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) { |
9485 | return wrapper_CompositeImplicitAutograd_Scalar_mode_divide_(self, other, rounding_mode); |
9486 | } |
9487 | at::Tensor true_divide(const at::Tensor & self, const at::Tensor & other) { |
9488 | return wrapper_CompositeImplicitAutograd_Tensor_true_divide(self, other); |
9489 | } |
9490 | at::Tensor & true_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
9491 | return wrapper_CompositeImplicitAutograd_out_true_divide_out(self, other, out); |
9492 | } |
9493 | at::Tensor & true_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
9494 | return wrapper_CompositeImplicitAutograd_out_true_divide_out(self, other, out); |
9495 | } |
9496 | at::Tensor & true_divide_(at::Tensor & self, const at::Tensor & other) { |
9497 | return wrapper_CompositeImplicitAutograd_Tensor_true_divide_(self, other); |
9498 | } |
9499 | at::Tensor true_divide(const at::Tensor & self, const at::Scalar & other) { |
9500 | return wrapper_CompositeImplicitAutograd_Scalar_true_divide(self, other); |
9501 | } |
9502 | at::Tensor & true_divide_(at::Tensor & self, const at::Scalar & other) { |
9503 | return wrapper_CompositeImplicitAutograd_Scalar_true_divide_(self, other); |
9504 | } |
9505 | at::Tensor einsum(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) { |
9506 | return wrapper_CompositeImplicitAutograd__einsum(equation, tensors, path); |
9507 | } |
9508 | at::Tensor embedding_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) { |
9509 | return wrapper_CompositeImplicitAutograd__embedding_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse); |
9510 | } |
9511 | at::Tensor embedding_backward_symint(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) { |
9512 | return wrapper_CompositeImplicitAutograd__embedding_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse); |
9513 | } |
9514 | at::Tensor embedding_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { |
9515 | return wrapper_CompositeImplicitAutograd__embedding_sparse_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq); |
9516 | } |
9517 | ::std::tuple<at::Tensor,at::Tensor> _rowwise_prune(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) { |
9518 | return wrapper_CompositeImplicitAutograd___rowwise_prune(weight, mask, compressed_indices_dtype); |
9519 | } |
9520 | at::Tensor row_stack(at::TensorList tensors) { |
9521 | return wrapper_CompositeImplicitAutograd__row_stack(tensors); |
9522 | } |
9523 | at::Tensor & row_stack_out(at::Tensor & out, at::TensorList tensors) { |
9524 | return wrapper_CompositeImplicitAutograd_out_row_stack_out(tensors, out); |
9525 | } |
9526 | at::Tensor & row_stack_outf(at::TensorList tensors, at::Tensor & out) { |
9527 | return wrapper_CompositeImplicitAutograd_out_row_stack_out(tensors, out); |
9528 | } |
9529 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset) { |
9530 | return wrapper_CompositeImplicitAutograd__embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset); |
9531 | } |
9532 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx) { |
9533 | return wrapper_CompositeImplicitAutograd_padding_idx_embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); |
9534 | } |
9535 | at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) { |
9536 | return wrapper_CompositeImplicitAutograd___embedding_bag_backward(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx); |
9537 | } |
9538 | at::Tensor _embedding_bag_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) { |
9539 | return wrapper_CompositeImplicitAutograd___embedding_bag_backward(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx); |
9540 | } |
9541 | at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) { |
9542 | return wrapper_CompositeImplicitAutograd___embedding_bag_sparse_backward(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); |
9543 | } |
9544 | at::Tensor _embedding_bag_sparse_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) { |
9545 | return wrapper_CompositeImplicitAutograd___embedding_bag_sparse_backward(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); |
9546 | } |
9547 | at::Tensor & empty_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
9548 | return wrapper_CompositeImplicitAutograd_out_empty_out(c10::fromIntArrayRefSlow(size), memory_format, out); |
9549 | } |
9550 | at::Tensor & empty_outf(at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
9551 | return wrapper_CompositeImplicitAutograd_out_empty_out(c10::fromIntArrayRefSlow(size), memory_format, out); |
9552 | } |
9553 | at::Tensor & empty_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { |
9554 | return wrapper_CompositeImplicitAutograd_out_empty_out(size, memory_format, out); |
9555 | } |
9556 | at::Tensor & empty_symint_outf(c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
9557 | return wrapper_CompositeImplicitAutograd_out_empty_out(size, memory_format, out); |
9558 | } |
9559 | at::Tensor expand_as(const at::Tensor & self, const at::Tensor & other) { |
9560 | return wrapper_CompositeImplicitAutograd__expand_as(self, other); |
9561 | } |
9562 | at::Tensor flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim) { |
9563 | return wrapper_CompositeImplicitAutograd_using_ints_flatten(self, start_dim, end_dim); |
9564 | } |
9565 | at::Tensor flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) { |
9566 | return wrapper_CompositeImplicitAutograd_named_out_dim_flatten(self, start_dim, end_dim, out_dim); |
9567 | } |
9568 | at::Tensor flatten(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) { |
9569 | return wrapper_CompositeImplicitAutograd_using_names_flatten(self, start_dim, end_dim, out_dim); |
9570 | } |
9571 | at::Tensor flatten(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) { |
9572 | return wrapper_CompositeImplicitAutograd_DimnameList_flatten(self, dims, out_dim); |
9573 | } |
9574 | at::Tensor unflatten(const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) { |
9575 | return wrapper_CompositeImplicitAutograd_int_unflatten(self, dim, sizes); |
9576 | } |
9577 | at::Tensor unflatten(const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) { |
9578 | return wrapper_CompositeImplicitAutograd_Dimname_unflatten(self, dim, sizes, names); |
9579 | } |
9580 | at::Tensor floor_divide(const at::Tensor & self, const at::Scalar & other) { |
9581 | return wrapper_CompositeImplicitAutograd_Scalar_floor_divide(self, other); |
9582 | } |
9583 | at::Tensor & floor_divide_(at::Tensor & self, const at::Scalar & other) { |
9584 | return wrapper_CompositeImplicitAutograd_Scalar_floor_divide_(self, other); |
9585 | } |
9586 | at::Tensor grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { |
9587 | return wrapper_CompositeImplicitAutograd__grid_sampler(input, grid, interpolation_mode, padding_mode, align_corners); |
9588 | } |
9589 | ::std::tuple<at::Tensor,at::Tensor> _grid_sampler_2d_cpu_fallback_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { |
9590 | return wrapper_CompositeImplicitAutograd___grid_sampler_2d_cpu_fallback_backward(grad_output, input, grid, interpolation_mode, padding_mode, align_corners); |
9591 | } |
9592 | at::Tensor hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) { |
9593 | return wrapper_CompositeImplicitAutograd__hinge_embedding_loss(self, target, margin, reduction); |
9594 | } |
9595 | at::Tensor group_norm(const at::Tensor & input, int64_t num_groups, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enabled) { |
9596 | return wrapper_CompositeImplicitAutograd__group_norm(input, num_groups, weight, bias, eps, cudnn_enabled); |
9597 | } |
9598 | int64_t _cufft_get_plan_cache_size(int64_t device_index) { |
9599 | return wrapper_CompositeImplicitAutograd___cufft_get_plan_cache_size(device_index); |
9600 | } |
9601 | int64_t _cufft_get_plan_cache_max_size(int64_t device_index) { |
9602 | return wrapper_CompositeImplicitAutograd___cufft_get_plan_cache_max_size(device_index); |
9603 | } |
9604 | void _cufft_set_plan_cache_max_size(int64_t device_index, int64_t max_size) { |
9605 | return wrapper_CompositeImplicitAutograd___cufft_set_plan_cache_max_size(device_index, max_size); |
9606 | } |
9607 | void _cufft_clear_plan_cache(int64_t device_index) { |
9608 | return wrapper_CompositeImplicitAutograd___cufft_clear_plan_cache(device_index); |
9609 | } |
9610 | at::Tensor & index_copy_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { |
9611 | return wrapper_CompositeImplicitAutograd_dimname_index_copy_(self, dim, index, source); |
9612 | } |
9613 | at::Tensor index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { |
9614 | return wrapper_CompositeImplicitAutograd_dimname_index_copy(self, dim, index, source); |
9615 | } |
9616 | at::Tensor instance_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { |
9617 | return wrapper_CompositeImplicitAutograd__instance_norm(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); |
9618 | } |
9619 | at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) { |
9620 | return wrapper_CompositeImplicitAutograd__isclose(self, other, rtol, atol, equal_nan); |
9621 | } |
9622 | bool is_distributed(const at::Tensor & self) { |
9623 | return wrapper_CompositeImplicitAutograd__is_distributed(self); |
9624 | } |
9625 | bool is_floating_point(const at::Tensor & self) { |
9626 | return wrapper_CompositeImplicitAutograd__is_floating_point(self); |
9627 | } |
9628 | bool is_complex(const at::Tensor & self) { |
9629 | return wrapper_CompositeImplicitAutograd__is_complex(self); |
9630 | } |
9631 | bool is_conj(const at::Tensor & self) { |
9632 | return wrapper_CompositeImplicitAutograd__is_conj(self); |
9633 | } |
9634 | bool _is_zerotensor(const at::Tensor & self) { |
9635 | return wrapper_CompositeImplicitAutograd___is_zerotensor(self); |
9636 | } |
9637 | bool is_neg(const at::Tensor & self) { |
9638 | return wrapper_CompositeImplicitAutograd__is_neg(self); |
9639 | } |
9640 | at::Tensor isreal(const at::Tensor & self) { |
9641 | return wrapper_CompositeImplicitAutograd__isreal(self); |
9642 | } |
9643 | bool is_nonzero(const at::Tensor & self) { |
9644 | return wrapper_CompositeImplicitAutograd__is_nonzero(self); |
9645 | } |
9646 | bool is_signed(const at::Tensor & self) { |
9647 | return wrapper_CompositeImplicitAutograd__is_signed(self); |
9648 | } |
9649 | bool is_inference(const at::Tensor & self) { |
9650 | return wrapper_CompositeImplicitAutograd__is_inference(self); |
9651 | } |
9652 | at::Tensor kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) { |
9653 | return wrapper_CompositeImplicitAutograd__kl_div(self, target, reduction, log_target); |
9654 | } |
9655 | at::Tensor kron(const at::Tensor & self, const at::Tensor & other) { |
9656 | return wrapper_CompositeImplicitAutograd__kron(self, other); |
9657 | } |
9658 | at::Tensor & kron_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
9659 | return wrapper_CompositeImplicitAutograd_out_kron_out(self, other, out); |
9660 | } |
9661 | at::Tensor & kron_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
9662 | return wrapper_CompositeImplicitAutograd_out_kron_out(self, other, out); |
9663 | } |
9664 | ::std::tuple<at::Tensor,at::Tensor> kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) { |
9665 | return wrapper_CompositeImplicitAutograd_dimname_kthvalue(self, k, dim, keepdim); |
9666 | } |
9667 | ::std::tuple<at::Tensor &,at::Tensor &> kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) { |
9668 | return wrapper_CompositeImplicitAutograd_dimname_out_kthvalue_out(self, k, dim, keepdim, values, indices); |
9669 | } |
9670 | ::std::tuple<at::Tensor &,at::Tensor &> kthvalue_outf(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
9671 | return wrapper_CompositeImplicitAutograd_dimname_out_kthvalue_out(self, k, dim, keepdim, values, indices); |
9672 | } |
9673 | at::Tensor layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enable) { |
9674 | return wrapper_CompositeImplicitAutograd__layer_norm(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, cudnn_enable); |
9675 | } |
9676 | at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enable) { |
9677 | return wrapper_CompositeImplicitAutograd__layer_norm(input, normalized_shape, weight, bias, eps, cudnn_enable); |
9678 | } |
9679 | at::Tensor linear(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) { |
9680 | return wrapper_CompositeImplicitAutograd__linear(input, weight, bias); |
9681 | } |
9682 | at::Tensor fbgemm_linear_int8_weight_fp32_activation(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) { |
9683 | return wrapper_CompositeImplicitAutograd__fbgemm_linear_int8_weight_fp32_activation(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); |
9684 | } |
9685 | at::Tensor fbgemm_linear_int8_weight(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) { |
9686 | return wrapper_CompositeImplicitAutograd__fbgemm_linear_int8_weight(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); |
9687 | } |
9688 | ::std::tuple<at::Tensor,at::Tensor,double,int64_t> fbgemm_linear_quantize_weight(const at::Tensor & input) { |
9689 | return wrapper_CompositeImplicitAutograd__fbgemm_linear_quantize_weight(input); |
9690 | } |
9691 | at::Tensor fbgemm_pack_gemm_matrix_fp16(const at::Tensor & input) { |
9692 | return wrapper_CompositeImplicitAutograd__fbgemm_pack_gemm_matrix_fp16(input); |
9693 | } |
9694 | at::Tensor fbgemm_linear_fp16_weight_fp32_activation(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) { |
9695 | return wrapper_CompositeImplicitAutograd__fbgemm_linear_fp16_weight_fp32_activation(input, packed_weight, bias); |
9696 | } |
9697 | at::Tensor fbgemm_linear_fp16_weight(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) { |
9698 | return wrapper_CompositeImplicitAutograd__fbgemm_linear_fp16_weight(input, packed_weight, bias); |
9699 | } |
9700 | at::Tensor fbgemm_pack_quantized_matrix(const at::Tensor & input) { |
9701 | return wrapper_CompositeImplicitAutograd__fbgemm_pack_quantized_matrix(input); |
9702 | } |
9703 | at::Tensor fbgemm_pack_quantized_matrix(const at::Tensor & input, int64_t K, int64_t N) { |
9704 | return wrapper_CompositeImplicitAutograd_KN_fbgemm_pack_quantized_matrix(input, K, N); |
9705 | } |
9706 | at::Tensor ldexp(const at::Tensor & self, const at::Tensor & other) { |
9707 | return wrapper_CompositeImplicitAutograd_Tensor_ldexp(self, other); |
9708 | } |
9709 | at::Tensor & ldexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
9710 | return wrapper_CompositeImplicitAutograd_out_ldexp_out(self, other, out); |
9711 | } |
9712 | at::Tensor & ldexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
9713 | return wrapper_CompositeImplicitAutograd_out_ldexp_out(self, other, out); |
9714 | } |
9715 | at::Tensor & ldexp_(at::Tensor & self, const at::Tensor & other) { |
9716 | return wrapper_CompositeImplicitAutograd__ldexp_(self, other); |
9717 | } |
9718 | at::Tensor log_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
9719 | return wrapper_CompositeImplicitAutograd_int_log_softmax(self, dim, dtype); |
9720 | } |
9721 | at::Tensor log_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
9722 | return wrapper_CompositeImplicitAutograd_Dimname_log_softmax(self, dim, dtype); |
9723 | } |
9724 | at::Tensor logcumsumexp(const at::Tensor & self, at::Dimname dim) { |
9725 | return wrapper_CompositeImplicitAutograd_dimname_logcumsumexp(self, dim); |
9726 | } |
9727 | at::Tensor & logcumsumexp_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim) { |
9728 | return wrapper_CompositeImplicitAutograd_dimname_out_logcumsumexp_out(self, dim, out); |
9729 | } |
9730 | at::Tensor & logcumsumexp_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & out) { |
9731 | return wrapper_CompositeImplicitAutograd_dimname_out_logcumsumexp_out(self, dim, out); |
9732 | } |
9733 | at::Tensor logsumexp(const at::Tensor & self, at::DimnameList dim, bool keepdim) { |
9734 | return wrapper_CompositeImplicitAutograd_names_logsumexp(self, dim, keepdim); |
9735 | } |
9736 | at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim) { |
9737 | return wrapper_CompositeImplicitAutograd_names_out_logsumexp_out(self, dim, keepdim, out); |
9738 | } |
9739 | at::Tensor & logsumexp_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) { |
9740 | return wrapper_CompositeImplicitAutograd_names_out_logsumexp_out(self, dim, keepdim, out); |
9741 | } |
9742 | at::Tensor margin_ranking_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) { |
9743 | return wrapper_CompositeImplicitAutograd__margin_ranking_loss(input1, input2, target, margin, reduction); |
9744 | } |
9745 | at::Tensor matmul(const at::Tensor & self, const at::Tensor & other) { |
9746 | return wrapper_CompositeImplicitAutograd__matmul(self, other); |
9747 | } |
9748 | at::Tensor & matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
9749 | return wrapper_CompositeImplicitAutograd_out_matmul_out(self, other, out); |
9750 | } |
9751 | at::Tensor & matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
9752 | return wrapper_CompositeImplicitAutograd_out_matmul_out(self, other, out); |
9753 | } |
9754 | at::Tensor matrix_power(const at::Tensor & self, int64_t n) { |
9755 | return wrapper_CompositeImplicitAutograd__matrix_power(self, n); |
9756 | } |
9757 | at::Tensor & matrix_power_out(at::Tensor & out, const at::Tensor & self, int64_t n) { |
9758 | return wrapper_CompositeImplicitAutograd_out_matrix_power_out(self, n, out); |
9759 | } |
9760 | at::Tensor & matrix_power_outf(const at::Tensor & self, int64_t n, at::Tensor & out) { |
9761 | return wrapper_CompositeImplicitAutograd_out_matrix_power_out(self, n, out); |
9762 | } |
9763 | at::Tensor matrix_exp(const at::Tensor & self) { |
9764 | return wrapper_CompositeImplicitAutograd__matrix_exp(self); |
9765 | } |
9766 | at::Tensor matrix_exp_backward(const at::Tensor & self, const at::Tensor & grad) { |
9767 | return wrapper_CompositeImplicitAutograd__matrix_exp_backward(self, grad); |
9768 | } |
9769 | ::std::tuple<at::Tensor,at::Tensor> max(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9770 | return wrapper_CompositeImplicitAutograd_names_dim_max(self, dim, keepdim); |
9771 | } |
9772 | ::std::tuple<at::Tensor &,at::Tensor &> max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9773 | return wrapper_CompositeImplicitAutograd_names_dim_max_max_out(self, dim, keepdim, max, max_values); |
9774 | } |
9775 | ::std::tuple<at::Tensor &,at::Tensor &> max_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) { |
9776 | return wrapper_CompositeImplicitAutograd_names_dim_max_max_out(self, dim, keepdim, max, max_values); |
9777 | } |
9778 | at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim) { |
9779 | return wrapper_CompositeImplicitAutograd__value_selecting_reduction_backward(grad, dim, indices, c10::fromIntArrayRefSlow(sizes), keepdim); |
9780 | } |
9781 | at::Tensor value_selecting_reduction_backward_symint(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) { |
9782 | return wrapper_CompositeImplicitAutograd__value_selecting_reduction_backward(grad, dim, indices, sizes, keepdim); |
9783 | } |
9784 | ::std::tuple<at::Tensor,at::Tensor> max_pool1d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
9785 | return wrapper_CompositeImplicitAutograd__max_pool1d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode); |
9786 | } |
9787 | at::Tensor max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
9788 | return wrapper_CompositeImplicitAutograd__max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode); |
9789 | } |
9790 | at::Tensor max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
9791 | return wrapper_CompositeImplicitAutograd__max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode); |
9792 | } |
9793 | at::Tensor max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { |
9794 | return wrapper_CompositeImplicitAutograd__max_pool3d(self, kernel_size, stride, padding, dilation, ceil_mode); |
9795 | } |
9796 | at::Tensor mean(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
9797 | return wrapper_CompositeImplicitAutograd_names_dim_mean(self, dim, keepdim, dtype); |
9798 | } |
9799 | at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
9800 | return wrapper_CompositeImplicitAutograd_names_out_mean_out(self, dim, keepdim, dtype, out); |
9801 | } |
9802 | at::Tensor & mean_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
9803 | return wrapper_CompositeImplicitAutograd_names_out_mean_out(self, dim, keepdim, dtype, out); |
9804 | } |
9805 | at::Tensor nanmean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
9806 | return wrapper_CompositeImplicitAutograd__nanmean(self, dim, keepdim, dtype); |
9807 | } |
9808 | at::Tensor & nanmean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
9809 | return wrapper_CompositeImplicitAutograd_out_nanmean_out(self, dim, keepdim, dtype, out); |
9810 | } |
9811 | at::Tensor & nanmean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
9812 | return wrapper_CompositeImplicitAutograd_out_nanmean_out(self, dim, keepdim, dtype, out); |
9813 | } |
9814 | ::std::tuple<at::Tensor,at::Tensor> median(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9815 | return wrapper_CompositeImplicitAutograd_names_dim_median(self, dim, keepdim); |
9816 | } |
9817 | ::std::tuple<at::Tensor &,at::Tensor &> median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9818 | return wrapper_CompositeImplicitAutograd_names_dim_values_median_out(self, dim, keepdim, values, indices); |
9819 | } |
9820 | ::std::tuple<at::Tensor &,at::Tensor &> median_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
9821 | return wrapper_CompositeImplicitAutograd_names_dim_values_median_out(self, dim, keepdim, values, indices); |
9822 | } |
9823 | ::std::tuple<at::Tensor,at::Tensor> nanmedian(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9824 | return wrapper_CompositeImplicitAutograd_names_dim_nanmedian(self, dim, keepdim); |
9825 | } |
9826 | ::std::tuple<at::Tensor &,at::Tensor &> nanmedian_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9827 | return wrapper_CompositeImplicitAutograd_names_dim_values_nanmedian_out(self, dim, keepdim, values, indices); |
9828 | } |
9829 | ::std::tuple<at::Tensor &,at::Tensor &> nanmedian_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
9830 | return wrapper_CompositeImplicitAutograd_names_dim_values_nanmedian_out(self, dim, keepdim, values, indices); |
9831 | } |
9832 | ::std::tuple<at::Tensor,at::Tensor> min(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9833 | return wrapper_CompositeImplicitAutograd_names_dim_min(self, dim, keepdim); |
9834 | } |
9835 | ::std::tuple<at::Tensor &,at::Tensor &> min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9836 | return wrapper_CompositeImplicitAutograd_names_dim_min_min_out(self, dim, keepdim, min, min_indices); |
9837 | } |
9838 | ::std::tuple<at::Tensor &,at::Tensor &> min_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) { |
9839 | return wrapper_CompositeImplicitAutograd_names_dim_min_min_out(self, dim, keepdim, min, min_indices); |
9840 | } |
9841 | at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense) { |
9842 | return wrapper_CompositeImplicitAutograd___sparse_mm(sparse, dense); |
9843 | } |
9844 | at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) { |
9845 | return wrapper_CompositeImplicitAutograd_reduce__sparse_mm(sparse, dense, reduce); |
9846 | } |
9847 | ::std::tuple<at::Tensor,at::Tensor> mode(const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9848 | return wrapper_CompositeImplicitAutograd_dimname_mode(self, dim, keepdim); |
9849 | } |
9850 | ::std::tuple<at::Tensor &,at::Tensor &> mode_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim) { |
9851 | return wrapper_CompositeImplicitAutograd_dimname_out_mode_out(self, dim, keepdim, values, indices); |
9852 | } |
9853 | ::std::tuple<at::Tensor &,at::Tensor &> mode_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
9854 | return wrapper_CompositeImplicitAutograd_dimname_out_mode_out(self, dim, keepdim, values, indices); |
9855 | } |
9856 | at::Tensor multiply(const at::Tensor & self, const at::Tensor & other) { |
9857 | return wrapper_CompositeImplicitAutograd_Tensor_multiply(self, other); |
9858 | } |
9859 | at::Tensor & multiply_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
9860 | return wrapper_CompositeImplicitAutograd_out_multiply_out(self, other, out); |
9861 | } |
9862 | at::Tensor & multiply_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
9863 | return wrapper_CompositeImplicitAutograd_out_multiply_out(self, other, out); |
9864 | } |
9865 | at::Tensor & multiply_(at::Tensor & self, const at::Tensor & other) { |
9866 | return wrapper_CompositeImplicitAutograd_Tensor_multiply_(self, other); |
9867 | } |
9868 | at::Tensor multiply(const at::Tensor & self, const at::Scalar & other) { |
9869 | return wrapper_CompositeImplicitAutograd_Scalar_multiply(self, other); |
9870 | } |
9871 | at::Tensor & multiply_(at::Tensor & self, const at::Scalar & other) { |
9872 | return wrapper_CompositeImplicitAutograd_Scalar_multiply_(self, other); |
9873 | } |
9874 | at::Tensor narrow(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) { |
9875 | return wrapper_CompositeImplicitAutograd__narrow(self, dim, start, length); |
9876 | } |
9877 | at::Tensor narrow_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { |
9878 | return wrapper_CompositeImplicitAutograd__narrow(self, dim, start, length); |
9879 | } |
9880 | at::Tensor narrow(const at::Tensor & self, int64_t dim, const at::Tensor & start, int64_t length) { |
9881 | return wrapper_CompositeImplicitAutograd_Tensor_narrow(self, dim, start, length); |
9882 | } |
9883 | at::Tensor narrow_symint(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) { |
9884 | return wrapper_CompositeImplicitAutograd_Tensor_narrow(self, dim, start, length); |
9885 | } |
9886 | bool is_vulkan_available() { |
9887 | return wrapper_CompositeImplicitAutograd__is_vulkan_available(); |
9888 | } |
9889 | bool _nnpack_available() { |
9890 | return wrapper_CompositeImplicitAutograd___nnpack_available(); |
9891 | } |
9892 | at::Tensor pairwise_distance(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) { |
9893 | return wrapper_CompositeImplicitAutograd__pairwise_distance(x1, x2, p, eps, keepdim); |
9894 | } |
9895 | at::Tensor cdist(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) { |
9896 | return wrapper_CompositeImplicitAutograd__cdist(x1, x2, p, compute_mode); |
9897 | } |
9898 | at::Tensor pdist(const at::Tensor & self, double p) { |
9899 | return wrapper_CompositeImplicitAutograd__pdist(self, p); |
9900 | } |
9901 | at::Tensor cosine_similarity(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) { |
9902 | return wrapper_CompositeImplicitAutograd__cosine_similarity(x1, x2, dim, eps); |
9903 | } |
9904 | at::Tensor movedim(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { |
9905 | return wrapper_CompositeImplicitAutograd_intlist_movedim(self, source, destination); |
9906 | } |
9907 | at::Tensor movedim(const at::Tensor & self, int64_t source, int64_t destination) { |
9908 | return wrapper_CompositeImplicitAutograd_int_movedim(self, source, destination); |
9909 | } |
9910 | at::Tensor moveaxis(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { |
9911 | return wrapper_CompositeImplicitAutograd_intlist_moveaxis(self, source, destination); |
9912 | } |
9913 | at::Tensor moveaxis(const at::Tensor & self, int64_t source, int64_t destination) { |
9914 | return wrapper_CompositeImplicitAutograd_int_moveaxis(self, source, destination); |
9915 | } |
9916 | at::Tensor numpy_T(const at::Tensor & self) { |
9917 | return wrapper_CompositeImplicitAutograd__numpy_T(self); |
9918 | } |
9919 | at::Tensor matrix_H(const at::Tensor & self) { |
9920 | return wrapper_CompositeImplicitAutograd__matrix_H(self); |
9921 | } |
9922 | at::Tensor mT(const at::Tensor & self) { |
9923 | return wrapper_CompositeImplicitAutograd__mT(self); |
9924 | } |
9925 | at::Tensor mH(const at::Tensor & self) { |
9926 | return wrapper_CompositeImplicitAutograd__mH(self); |
9927 | } |
9928 | at::Tensor adjoint(const at::Tensor & self) { |
9929 | return wrapper_CompositeImplicitAutograd__adjoint(self); |
9930 | } |
9931 | at::Tensor native_channel_shuffle(const at::Tensor & self, int64_t groups) { |
9932 | return wrapper_CompositeImplicitAutograd__native_channel_shuffle(self, groups); |
9933 | } |
9934 | at::Tensor pin_memory(const at::Tensor & self, c10::optional<at::Device> device) { |
9935 | return wrapper_CompositeImplicitAutograd__pin_memory(self, device); |
9936 | } |
9937 | at::Tensor pinverse(const at::Tensor & self, double rcond) { |
9938 | return wrapper_CompositeImplicitAutograd__pinverse(self, rcond); |
9939 | } |
9940 | at::Tensor poisson_nll_loss(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) { |
9941 | return wrapper_CompositeImplicitAutograd__poisson_nll_loss(input, target, log_input, full, eps, reduction); |
9942 | } |
9943 | at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator) { |
9944 | return wrapper_CompositeImplicitAutograd_generator_out_rand_out(c10::fromIntArrayRefSlow(size), generator, out); |
9945 | } |
9946 | at::Tensor & rand_outf(at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
9947 | return wrapper_CompositeImplicitAutograd_generator_out_rand_out(c10::fromIntArrayRefSlow(size), generator, out); |
9948 | } |
9949 | at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) { |
9950 | return wrapper_CompositeImplicitAutograd_generator_out_rand_out(size, generator, out); |
9951 | } |
9952 | at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
9953 | return wrapper_CompositeImplicitAutograd_generator_out_rand_out(size, generator, out); |
9954 | } |
9955 | at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size) { |
9956 | return wrapper_CompositeImplicitAutograd_out_randn_out(c10::fromIntArrayRefSlow(size), out); |
9957 | } |
9958 | at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out) { |
9959 | return wrapper_CompositeImplicitAutograd_out_randn_out(c10::fromIntArrayRefSlow(size), out); |
9960 | } |
9961 | at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size) { |
9962 | return wrapper_CompositeImplicitAutograd_out_randn_out(size, out); |
9963 | } |
9964 | at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) { |
9965 | return wrapper_CompositeImplicitAutograd_out_randn_out(size, out); |
9966 | } |
9967 | at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator) { |
9968 | return wrapper_CompositeImplicitAutograd_generator_out_randn_out(c10::fromIntArrayRefSlow(size), generator, out); |
9969 | } |
9970 | at::Tensor & randn_outf(at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
9971 | return wrapper_CompositeImplicitAutograd_generator_out_randn_out(c10::fromIntArrayRefSlow(size), generator, out); |
9972 | } |
9973 | at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) { |
9974 | return wrapper_CompositeImplicitAutograd_generator_out_randn_out(size, generator, out); |
9975 | } |
9976 | at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
9977 | return wrapper_CompositeImplicitAutograd_generator_out_randn_out(size, generator, out); |
9978 | } |
9979 | at::Tensor ravel(const at::Tensor & self) { |
9980 | return wrapper_CompositeImplicitAutograd__ravel(self); |
9981 | } |
9982 | at::Tensor negative(const at::Tensor & self) { |
9983 | return wrapper_CompositeImplicitAutograd__negative(self); |
9984 | } |
9985 | at::Tensor & negative_out(at::Tensor & out, const at::Tensor & self) { |
9986 | return wrapper_CompositeImplicitAutograd_out_negative_out(self, out); |
9987 | } |
9988 | at::Tensor & negative_outf(const at::Tensor & self, at::Tensor & out) { |
9989 | return wrapper_CompositeImplicitAutograd_out_negative_out(self, out); |
9990 | } |
9991 | at::Tensor & negative_(at::Tensor & self) { |
9992 | return wrapper_CompositeImplicitAutograd__negative_(self); |
9993 | } |
9994 | at::Tensor repeat_interleave(const at::Tensor & self, const at::Tensor & repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) { |
9995 | return wrapper_CompositeImplicitAutograd_self_Tensor_repeat_interleave(self, repeats, dim, output_size); |
9996 | } |
9997 | at::Tensor repeat_interleave(const at::Tensor & self, int64_t repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) { |
9998 | return wrapper_CompositeImplicitAutograd_self_int_repeat_interleave(self, repeats, dim, output_size); |
9999 | } |
10000 | at::Tensor repeat_interleave_symint(const at::Tensor & self, c10::SymInt repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) { |
10001 | return wrapper_CompositeImplicitAutograd_self_int_repeat_interleave(self, repeats, dim, output_size); |
10002 | } |
10003 | at::Tensor reshape(const at::Tensor & self, at::IntArrayRef shape) { |
10004 | return wrapper_CompositeImplicitAutograd__reshape(self, c10::fromIntArrayRefSlow(shape)); |
10005 | } |
10006 | at::Tensor reshape_symint(const at::Tensor & self, c10::SymIntArrayRef shape) { |
10007 | return wrapper_CompositeImplicitAutograd__reshape(self, shape); |
10008 | } |
10009 | at::Tensor reshape_as(const at::Tensor & self, const at::Tensor & other) { |
10010 | return wrapper_CompositeImplicitAutograd__reshape_as(self, other); |
10011 | } |
10012 | at::Tensor rrelu(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) { |
10013 | return wrapper_CompositeImplicitAutograd__rrelu(self, lower, upper, training, generator); |
10014 | } |
10015 | at::Tensor & rrelu_(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) { |
10016 | return wrapper_CompositeImplicitAutograd__rrelu_(self, lower, upper, training, generator); |
10017 | } |
10018 | at::Tensor relu6(const at::Tensor & self) { |
10019 | return wrapper_CompositeImplicitAutograd__relu6(self); |
10020 | } |
10021 | at::Tensor & relu6_(at::Tensor & self) { |
10022 | return wrapper_CompositeImplicitAutograd__relu6_(self); |
10023 | } |
10024 | at::Tensor prelu(const at::Tensor & self, const at::Tensor & weight) { |
10025 | return wrapper_CompositeImplicitAutograd__prelu(self, weight); |
10026 | } |
10027 | at::Tensor infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self) { |
10028 | return wrapper_CompositeImplicitAutograd__infinitely_differentiable_gelu_backward(grad, self); |
10029 | } |
10030 | at::Tensor select(const at::Tensor & self, at::Dimname dim, int64_t index) { |
10031 | return wrapper_CompositeImplicitAutograd_Dimname_select(self, dim, index); |
10032 | } |
10033 | at::Tensor selu(const at::Tensor & self) { |
10034 | return wrapper_CompositeImplicitAutograd__selu(self); |
10035 | } |
10036 | at::Tensor & selu_(at::Tensor & self) { |
10037 | return wrapper_CompositeImplicitAutograd__selu_(self); |
10038 | } |
10039 | at::Tensor silu_backward(const at::Tensor & grad_output, const at::Tensor & self) { |
10040 | return wrapper_CompositeImplicitAutograd__silu_backward(grad_output, self); |
10041 | } |
10042 | at::Tensor mish_backward(const at::Tensor & grad_output, const at::Tensor & self) { |
10043 | return wrapper_CompositeImplicitAutograd__mish_backward(grad_output, self); |
10044 | } |
10045 | int64_t size(const at::Tensor & self, int64_t dim) { |
10046 | return wrapper_CompositeImplicitAutograd_int_size(self, dim); |
10047 | } |
10048 | int64_t size(const at::Tensor & self, at::Dimname dim) { |
10049 | return wrapper_CompositeImplicitAutograd_Dimname_size(self, dim); |
10050 | } |
10051 | at::Tensor smm(const at::Tensor & self, const at::Tensor & mat2) { |
10052 | return wrapper_CompositeImplicitAutograd__smm(self, mat2); |
10053 | } |
10054 | at::Tensor softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
10055 | return wrapper_CompositeImplicitAutograd_int_softmax(self, dim, dtype); |
10056 | } |
10057 | at::Tensor softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
10058 | return wrapper_CompositeImplicitAutograd_Dimname_softmax(self, dim, dtype); |
10059 | } |
10060 | ::std::vector<at::Tensor> split(const at::Tensor & self, at::IntArrayRef split_size, int64_t dim) { |
10061 | return wrapper_CompositeImplicitAutograd_sizes_split(self, c10::fromIntArrayRefSlow(split_size), dim); |
10062 | } |
10063 | ::std::vector<at::Tensor> split_symint(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) { |
10064 | return wrapper_CompositeImplicitAutograd_sizes_split(self, split_size, dim); |
10065 | } |
10066 | ::std::vector<at::Tensor> hsplit(const at::Tensor & self, int64_t sections) { |
10067 | return wrapper_CompositeImplicitAutograd_int_hsplit(self, sections); |
10068 | } |
10069 | ::std::vector<at::Tensor> hsplit(const at::Tensor & self, at::IntArrayRef indices) { |
10070 | return wrapper_CompositeImplicitAutograd_array_hsplit(self, indices); |
10071 | } |
10072 | ::std::vector<at::Tensor> vsplit(const at::Tensor & self, int64_t sections) { |
10073 | return wrapper_CompositeImplicitAutograd_int_vsplit(self, sections); |
10074 | } |
10075 | ::std::vector<at::Tensor> vsplit(const at::Tensor & self, at::IntArrayRef indices) { |
10076 | return wrapper_CompositeImplicitAutograd_array_vsplit(self, indices); |
10077 | } |
10078 | ::std::vector<at::Tensor> dsplit(const at::Tensor & self, int64_t sections) { |
10079 | return wrapper_CompositeImplicitAutograd_int_dsplit(self, sections); |
10080 | } |
10081 | ::std::vector<at::Tensor> dsplit(const at::Tensor & self, at::IntArrayRef indices) { |
10082 | return wrapper_CompositeImplicitAutograd_array_dsplit(self, indices); |
10083 | } |
10084 | at::Tensor squeeze(const at::Tensor & self, at::Dimname dim) { |
10085 | return wrapper_CompositeImplicitAutograd_dimname_squeeze(self, dim); |
10086 | } |
10087 | at::Tensor & squeeze_(at::Tensor & self, at::Dimname dim) { |
10088 | return wrapper_CompositeImplicitAutograd_dimname_squeeze_(self, dim); |
10089 | } |
10090 | at::Tensor sspaddmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { |
10091 | return wrapper_CompositeImplicitAutograd__sspaddmm(self, mat1, mat2, beta, alpha); |
10092 | } |
10093 | at::Tensor hstack(at::TensorList tensors) { |
10094 | return wrapper_CompositeImplicitAutograd__hstack(tensors); |
10095 | } |
10096 | at::Tensor & hstack_out(at::Tensor & out, at::TensorList tensors) { |
10097 | return wrapper_CompositeImplicitAutograd_out_hstack_out(tensors, out); |
10098 | } |
10099 | at::Tensor & hstack_outf(at::TensorList tensors, at::Tensor & out) { |
10100 | return wrapper_CompositeImplicitAutograd_out_hstack_out(tensors, out); |
10101 | } |
10102 | at::Tensor vstack(at::TensorList tensors) { |
10103 | return wrapper_CompositeImplicitAutograd__vstack(tensors); |
10104 | } |
10105 | at::Tensor & vstack_out(at::Tensor & out, at::TensorList tensors) { |
10106 | return wrapper_CompositeImplicitAutograd_out_vstack_out(tensors, out); |
10107 | } |
10108 | at::Tensor & vstack_outf(at::TensorList tensors, at::Tensor & out) { |
10109 | return wrapper_CompositeImplicitAutograd_out_vstack_out(tensors, out); |
10110 | } |
10111 | at::Tensor dstack(at::TensorList tensors) { |
10112 | return wrapper_CompositeImplicitAutograd__dstack(tensors); |
10113 | } |
10114 | at::Tensor & dstack_out(at::Tensor & out, at::TensorList tensors) { |
10115 | return wrapper_CompositeImplicitAutograd_out_dstack_out(tensors, out); |
10116 | } |
10117 | at::Tensor & dstack_outf(at::TensorList tensors, at::Tensor & out) { |
10118 | return wrapper_CompositeImplicitAutograd_out_dstack_out(tensors, out); |
10119 | } |
10120 | at::Tensor stft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) { |
10121 | return wrapper_CompositeImplicitAutograd__stft(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex); |
10122 | } |
10123 | at::Tensor stft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) { |
10124 | return wrapper_CompositeImplicitAutograd_center_stft(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex); |
10125 | } |
10126 | at::Tensor istft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, bool normalized, c10::optional<bool> onesided, c10::optional<int64_t> length, bool return_complex) { |
10127 | return wrapper_CompositeImplicitAutograd__istft(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex); |
10128 | } |
10129 | int64_t stride(const at::Tensor & self, int64_t dim) { |
10130 | return wrapper_CompositeImplicitAutograd_int_stride(self, dim); |
10131 | } |
10132 | int64_t stride(const at::Tensor & self, at::Dimname dim) { |
10133 | return wrapper_CompositeImplicitAutograd_Dimname_stride(self, dim); |
10134 | } |
10135 | at::Tensor sum(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
10136 | return wrapper_CompositeImplicitAutograd_dim_DimnameList_sum(self, dim, keepdim, dtype); |
10137 | } |
10138 | at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
10139 | return wrapper_CompositeImplicitAutograd_DimnameList_out_sum_out(self, dim, keepdim, dtype, out); |
10140 | } |
10141 | at::Tensor & sum_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
10142 | return wrapper_CompositeImplicitAutograd_DimnameList_out_sum_out(self, dim, keepdim, dtype, out); |
10143 | } |
10144 | at::Tensor sum_to_size(const at::Tensor & self, at::IntArrayRef size) { |
10145 | return wrapper_CompositeImplicitAutograd__sum_to_size(self, size); |
10146 | } |
10147 | at::Tensor square(const at::Tensor & self) { |
10148 | return wrapper_CompositeImplicitAutograd__square(self); |
10149 | } |
10150 | at::Tensor & square_out(at::Tensor & out, const at::Tensor & self) { |
10151 | return wrapper_CompositeImplicitAutograd_out_square_out(self, out); |
10152 | } |
10153 | at::Tensor & square_outf(const at::Tensor & self, at::Tensor & out) { |
10154 | return wrapper_CompositeImplicitAutograd_out_square_out(self, out); |
10155 | } |
10156 | at::Tensor & square_(at::Tensor & self) { |
10157 | return wrapper_CompositeImplicitAutograd__square_(self); |
10158 | } |
10159 | at::Tensor std(const at::Tensor & self, bool unbiased) { |
10160 | return wrapper_CompositeImplicitAutograd__std(self, unbiased); |
10161 | } |
10162 | at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { |
10163 | return wrapper_CompositeImplicitAutograd_dim_std(self, dim, unbiased, keepdim); |
10164 | } |
10165 | at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { |
10166 | return wrapper_CompositeImplicitAutograd_out_std_out(self, dim, unbiased, keepdim, out); |
10167 | } |
10168 | at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) { |
10169 | return wrapper_CompositeImplicitAutograd_out_std_out(self, dim, unbiased, keepdim, out); |
10170 | } |
10171 | ::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, bool unbiased) { |
10172 | return wrapper_CompositeImplicitAutograd__std_mean(self, unbiased); |
10173 | } |
10174 | ::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { |
10175 | return wrapper_CompositeImplicitAutograd_dim_std_mean(self, dim, unbiased, keepdim); |
10176 | } |
10177 | ::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { |
10178 | return wrapper_CompositeImplicitAutograd_names_dim_std_mean(self, dim, unbiased, keepdim); |
10179 | } |
10180 | ::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) { |
10181 | return wrapper_CompositeImplicitAutograd_correction_names_std_mean(self, dim, correction, keepdim); |
10182 | } |
10183 | at::Tensor std(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { |
10184 | return wrapper_CompositeImplicitAutograd_names_dim_std(self, dim, unbiased, keepdim); |
10185 | } |
10186 | at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { |
10187 | return wrapper_CompositeImplicitAutograd_names_out_std_out(self, dim, unbiased, keepdim, out); |
10188 | } |
10189 | at::Tensor & std_outf(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) { |
10190 | return wrapper_CompositeImplicitAutograd_names_out_std_out(self, dim, unbiased, keepdim, out); |
10191 | } |
10192 | at::Tensor std(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) { |
10193 | return wrapper_CompositeImplicitAutograd_correction_names_std(self, dim, correction, keepdim); |
10194 | } |
10195 | at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) { |
10196 | return wrapper_CompositeImplicitAutograd_correction_names_out_std_out(self, dim, correction, keepdim, out); |
10197 | } |
10198 | at::Tensor & std_outf(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) { |
10199 | return wrapper_CompositeImplicitAutograd_correction_names_out_std_out(self, dim, correction, keepdim, out); |
10200 | } |
10201 | at::Tensor prod(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
10202 | return wrapper_CompositeImplicitAutograd_dim_Dimname_prod(self, dim, keepdim, dtype); |
10203 | } |
10204 | at::Tensor & prod_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
10205 | return wrapper_CompositeImplicitAutograd_Dimname_out_prod_out(self, dim, keepdim, dtype, out); |
10206 | } |
10207 | at::Tensor & prod_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
10208 | return wrapper_CompositeImplicitAutograd_Dimname_out_prod_out(self, dim, keepdim, dtype, out); |
10209 | } |
10210 | at::Tensor tensordot(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) { |
10211 | return wrapper_CompositeImplicitAutograd__tensordot(self, other, dims_self, dims_other); |
10212 | } |
10213 | at::Tensor tile(const at::Tensor & self, at::IntArrayRef dims) { |
10214 | return wrapper_CompositeImplicitAutograd__tile(self, dims); |
10215 | } |
10216 | at::Tensor transpose(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) { |
10217 | return wrapper_CompositeImplicitAutograd_Dimname_transpose(self, dim0, dim1); |
10218 | } |
10219 | at::Tensor one_hot(const at::Tensor & self, int64_t num_classes) { |
10220 | return wrapper_CompositeImplicitAutograd__one_hot(self, num_classes); |
10221 | } |
10222 | at::Tensor fliplr(const at::Tensor & self) { |
10223 | return wrapper_CompositeImplicitAutograd__fliplr(self); |
10224 | } |
10225 | at::Tensor flipud(const at::Tensor & self) { |
10226 | return wrapper_CompositeImplicitAutograd__flipud(self); |
10227 | } |
10228 | at::Tensor trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim) { |
10229 | return wrapper_CompositeImplicitAutograd_x_trapezoid(y, x, dim); |
10230 | } |
10231 | at::Tensor trapezoid(const at::Tensor & y, const at::Scalar & dx, int64_t dim) { |
10232 | return wrapper_CompositeImplicitAutograd_dx_trapezoid(y, dx, dim); |
10233 | } |
10234 | at::Tensor trapz(const at::Tensor & y, const at::Tensor & x, int64_t dim) { |
10235 | return wrapper_CompositeImplicitAutograd_x_trapz(y, x, dim); |
10236 | } |
10237 | at::Tensor trapz(const at::Tensor & y, double dx, int64_t dim) { |
10238 | return wrapper_CompositeImplicitAutograd_dx_trapz(y, dx, dim); |
10239 | } |
10240 | at::Tensor triplet_margin_loss(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) { |
10241 | return wrapper_CompositeImplicitAutograd__triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction); |
10242 | } |
10243 | at::Tensor fix(const at::Tensor & self) { |
10244 | return wrapper_CompositeImplicitAutograd__fix(self); |
10245 | } |
10246 | at::Tensor & fix_out(at::Tensor & out, const at::Tensor & self) { |
10247 | return wrapper_CompositeImplicitAutograd_out_fix_out(self, out); |
10248 | } |
10249 | at::Tensor & fix_outf(const at::Tensor & self, at::Tensor & out) { |
10250 | return wrapper_CompositeImplicitAutograd_out_fix_out(self, out); |
10251 | } |
10252 | at::Tensor & fix_(at::Tensor & self) { |
10253 | return wrapper_CompositeImplicitAutograd__fix_(self); |
10254 | } |
10255 | at::Tensor type_as(const at::Tensor & self, const at::Tensor & other) { |
10256 | return wrapper_CompositeImplicitAutograd__type_as(self, other); |
10257 | } |
10258 | bool _has_compatible_shallow_copy_type(const at::Tensor & self, const at::Tensor & from) { |
10259 | return wrapper_CompositeImplicitAutograd___has_compatible_shallow_copy_type(self, from); |
10260 | } |
10261 | at::Tensor vander(const at::Tensor & x, c10::optional<int64_t> N, bool increasing) { |
10262 | return wrapper_CompositeImplicitAutograd__vander(x, N, increasing); |
10263 | } |
10264 | at::Tensor var(const at::Tensor & self, bool unbiased) { |
10265 | return wrapper_CompositeImplicitAutograd__var(self, unbiased); |
10266 | } |
10267 | at::Tensor var(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { |
10268 | return wrapper_CompositeImplicitAutograd_dim_var(self, dim, unbiased, keepdim); |
10269 | } |
10270 | at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { |
10271 | return wrapper_CompositeImplicitAutograd_out_var_out(self, dim, unbiased, keepdim, out); |
10272 | } |
10273 | at::Tensor & var_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) { |
10274 | return wrapper_CompositeImplicitAutograd_out_var_out(self, dim, unbiased, keepdim, out); |
10275 | } |
10276 | at::Tensor var(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { |
10277 | return wrapper_CompositeImplicitAutograd_names_dim_var(self, dim, unbiased, keepdim); |
10278 | } |
10279 | at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { |
10280 | return wrapper_CompositeImplicitAutograd_names_out_var_out(self, dim, unbiased, keepdim, out); |
10281 | } |
10282 | at::Tensor & var_outf(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) { |
10283 | return wrapper_CompositeImplicitAutograd_names_out_var_out(self, dim, unbiased, keepdim, out); |
10284 | } |
10285 | at::Tensor var(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) { |
10286 | return wrapper_CompositeImplicitAutograd_correction_names_var(self, dim, correction, keepdim); |
10287 | } |
10288 | at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) { |
10289 | return wrapper_CompositeImplicitAutograd_correction_names_out_var_out(self, dim, correction, keepdim, out); |
10290 | } |
10291 | at::Tensor & var_outf(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) { |
10292 | return wrapper_CompositeImplicitAutograd_correction_names_out_var_out(self, dim, correction, keepdim, out); |
10293 | } |
10294 | ::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, bool unbiased) { |
10295 | return wrapper_CompositeImplicitAutograd__var_mean(self, unbiased); |
10296 | } |
10297 | ::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { |
10298 | return wrapper_CompositeImplicitAutograd_dim_var_mean(self, dim, unbiased, keepdim); |
10299 | } |
10300 | ::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { |
10301 | return wrapper_CompositeImplicitAutograd_names_dim_var_mean(self, dim, unbiased, keepdim); |
10302 | } |
10303 | ::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) { |
10304 | return wrapper_CompositeImplicitAutograd_correction_names_var_mean(self, dim, correction, keepdim); |
10305 | } |
10306 | at::Tensor view_as(const at::Tensor & self, const at::Tensor & other) { |
10307 | return wrapper_CompositeImplicitAutograd__view_as(self, other); |
10308 | } |
10309 | at::Tensor where(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) { |
10310 | return wrapper_CompositeImplicitAutograd_ScalarSelf_where(condition, self, other); |
10311 | } |
10312 | at::Tensor where(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) { |
10313 | return wrapper_CompositeImplicitAutograd_ScalarOther_where(condition, self, other); |
10314 | } |
10315 | at::Tensor where(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) { |
10316 | return wrapper_CompositeImplicitAutograd_Scalar_where(condition, self, other); |
10317 | } |
10318 | ::std::vector<at::Tensor> where(const at::Tensor & condition) { |
10319 | return wrapper_CompositeImplicitAutograd__where(condition); |
10320 | } |
10321 | at::Tensor norm_except_dim(const at::Tensor & v, int64_t pow, int64_t dim) { |
10322 | return wrapper_CompositeImplicitAutograd__norm_except_dim(v, pow, dim); |
10323 | } |
10324 | at::Tensor _weight_norm(const at::Tensor & v, const at::Tensor & g, int64_t dim) { |
10325 | return wrapper_CompositeImplicitAutograd___weight_norm(v, g, dim); |
10326 | } |
10327 | ::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { |
10328 | return wrapper_CompositeImplicitAutograd___weight_norm_differentiable_backward(grad_w, saved_v, saved_g, saved_norms, dim); |
10329 | } |
10330 | at::Tensor _sparse_sum(const at::Tensor & self) { |
10331 | return wrapper_CompositeImplicitAutograd___sparse_sum(self); |
10332 | } |
10333 | at::Tensor _sparse_sum(const at::Tensor & self, at::ScalarType dtype) { |
10334 | return wrapper_CompositeImplicitAutograd_dtype__sparse_sum(self, dtype); |
10335 | } |
10336 | at::Tensor _sparse_sum(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) { |
10337 | return wrapper_CompositeImplicitAutograd_dim_dtype__sparse_sum(self, dim, dtype); |
10338 | } |
10339 | at::Tensor _sparse_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
10340 | return wrapper_CompositeImplicitAutograd_int__sparse_softmax(self, dim, dtype); |
10341 | } |
10342 | at::Tensor _sparse_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
10343 | return wrapper_CompositeImplicitAutograd_Dimname__sparse_softmax(self, dim, dtype); |
10344 | } |
10345 | at::Tensor _sparse_log_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
10346 | return wrapper_CompositeImplicitAutograd_int__sparse_log_softmax(self, dim, dtype); |
10347 | } |
10348 | at::Tensor _sparse_log_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) { |
10349 | return wrapper_CompositeImplicitAutograd_Dimname__sparse_log_softmax(self, dim, dtype); |
10350 | } |
10351 | at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) { |
10352 | return wrapper_CompositeImplicitAutograd_names_ScalarOpt_dim_dtype_norm(self, p, dim, keepdim, dtype); |
10353 | } |
10354 | at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) { |
10355 | return wrapper_CompositeImplicitAutograd_names_dtype_out_norm_out(self, p, dim, keepdim, dtype, out); |
10356 | } |
10357 | at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) { |
10358 | return wrapper_CompositeImplicitAutograd_names_dtype_out_norm_out(self, p, dim, keepdim, dtype, out); |
10359 | } |
10360 | at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) { |
10361 | return wrapper_CompositeImplicitAutograd_names_ScalarOpt_dim_norm(self, p, dim, keepdim); |
10362 | } |
10363 | at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) { |
10364 | return wrapper_CompositeImplicitAutograd_names_out_norm_out(self, p, dim, keepdim, out); |
10365 | } |
10366 | at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out) { |
10367 | return wrapper_CompositeImplicitAutograd_names_out_norm_out(self, p, dim, keepdim, out); |
10368 | } |
10369 | at::Tensor frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { |
10370 | return wrapper_CompositeImplicitAutograd_dim_frobenius_norm(self, dim, keepdim); |
10371 | } |
10372 | at::Tensor & frobenius_norm_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { |
10373 | return wrapper_CompositeImplicitAutograd_out_frobenius_norm_out(self, dim, keepdim, out); |
10374 | } |
10375 | at::Tensor & frobenius_norm_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { |
10376 | return wrapper_CompositeImplicitAutograd_out_frobenius_norm_out(self, dim, keepdim, out); |
10377 | } |
10378 | at::Tensor nuclear_norm(const at::Tensor & self, bool keepdim) { |
10379 | return wrapper_CompositeImplicitAutograd__nuclear_norm(self, keepdim); |
10380 | } |
10381 | at::Tensor & nuclear_norm_out(at::Tensor & out, const at::Tensor & self, bool keepdim) { |
10382 | return wrapper_CompositeImplicitAutograd_out_nuclear_norm_out(self, keepdim, out); |
10383 | } |
10384 | at::Tensor & nuclear_norm_outf(const at::Tensor & self, bool keepdim, at::Tensor & out) { |
10385 | return wrapper_CompositeImplicitAutograd_out_nuclear_norm_out(self, keepdim, out); |
10386 | } |
10387 | at::Tensor nuclear_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { |
10388 | return wrapper_CompositeImplicitAutograd_dim_nuclear_norm(self, dim, keepdim); |
10389 | } |
10390 | at::Tensor & nuclear_norm_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { |
10391 | return wrapper_CompositeImplicitAutograd_dim_out_nuclear_norm_out(self, dim, keepdim, out); |
10392 | } |
10393 | at::Tensor & nuclear_norm_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { |
10394 | return wrapper_CompositeImplicitAutograd_dim_out_nuclear_norm_out(self, dim, keepdim, out); |
10395 | } |
10396 | at::Tensor positive(const at::Tensor & self) { |
10397 | return wrapper_CompositeImplicitAutograd__positive(self); |
10398 | } |
10399 | at::Tensor subtract(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
10400 | return wrapper_CompositeImplicitAutograd_Tensor_subtract(self, other, alpha); |
10401 | } |
10402 | at::Tensor & subtract_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
10403 | return wrapper_CompositeImplicitAutograd_out_subtract_out(self, other, alpha, out); |
10404 | } |
10405 | at::Tensor & subtract_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
10406 | return wrapper_CompositeImplicitAutograd_out_subtract_out(self, other, alpha, out); |
10407 | } |
10408 | at::Tensor & subtract_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
10409 | return wrapper_CompositeImplicitAutograd_Tensor_subtract_(self, other, alpha); |
10410 | } |
10411 | at::Tensor subtract(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
10412 | return wrapper_CompositeImplicitAutograd_Scalar_subtract(self, other, alpha); |
10413 | } |
10414 | at::Tensor & subtract_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
10415 | return wrapper_CompositeImplicitAutograd_Scalar_subtract_(self, other, alpha); |
10416 | } |
10417 | at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { |
10418 | return wrapper_CompositeImplicitAutograd_comp_plain_value_size_sparse_compressed_tensor(compressed_indices, plain_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10419 | } |
10420 | at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10421 | return wrapper_CompositeImplicitAutograd_comp_plain_value_size_sparse_compressed_tensor(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); |
10422 | } |
10423 | at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { |
10424 | return wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_csr_tensor(crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10425 | } |
10426 | at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10427 | return wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_csr_tensor(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); |
10428 | } |
10429 | at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { |
10430 | return wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_csc_tensor(ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10431 | } |
10432 | at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10433 | return wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_csc_tensor(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); |
10434 | } |
10435 | at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { |
10436 | return wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_bsr_tensor(crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10437 | } |
10438 | at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10439 | return wrapper_CompositeImplicitAutograd_crow_col_value_size_sparse_bsr_tensor(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); |
10440 | } |
10441 | at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { |
10442 | return wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_bsc_tensor(ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10443 | } |
10444 | at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10445 | return wrapper_CompositeImplicitAutograd_ccol_row_value_size_sparse_bsc_tensor(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); |
10446 | } |
10447 | at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::TensorOptions options) { |
10448 | return wrapper_CompositeImplicitAutograd_comp_plain_value_sparse_compressed_tensor(compressed_indices, plain_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10449 | } |
10450 | at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10451 | return wrapper_CompositeImplicitAutograd_comp_plain_value_sparse_compressed_tensor(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory); |
10452 | } |
10453 | at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) { |
10454 | return wrapper_CompositeImplicitAutograd_crow_col_value_sparse_csr_tensor(crow_indices, col_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10455 | } |
10456 | at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10457 | return wrapper_CompositeImplicitAutograd_crow_col_value_sparse_csr_tensor(crow_indices, col_indices, values, dtype, layout, device, pin_memory); |
10458 | } |
10459 | at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) { |
10460 | return wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_csc_tensor(ccol_indices, row_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10461 | } |
10462 | at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10463 | return wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_csc_tensor(ccol_indices, row_indices, values, dtype, layout, device, pin_memory); |
10464 | } |
10465 | at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) { |
10466 | return wrapper_CompositeImplicitAutograd_crow_col_value_sparse_bsr_tensor(crow_indices, col_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10467 | } |
10468 | at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10469 | return wrapper_CompositeImplicitAutograd_crow_col_value_sparse_bsr_tensor(crow_indices, col_indices, values, dtype, layout, device, pin_memory); |
10470 | } |
10471 | at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) { |
10472 | return wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_bsc_tensor(ccol_indices, row_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10473 | } |
10474 | at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10475 | return wrapper_CompositeImplicitAutograd_ccol_row_value_sparse_bsc_tensor(ccol_indices, row_indices, values, dtype, layout, device, pin_memory); |
10476 | } |
10477 | at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { |
10478 | return wrapper_CompositeImplicitAutograd___sparse_compressed_tensor_unsafe(compressed_indices, plain_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10479 | } |
10480 | at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10481 | return wrapper_CompositeImplicitAutograd___sparse_compressed_tensor_unsafe(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); |
10482 | } |
10483 | at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { |
10484 | return wrapper_CompositeImplicitAutograd___sparse_csr_tensor_unsafe(crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10485 | } |
10486 | at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10487 | return wrapper_CompositeImplicitAutograd___sparse_csr_tensor_unsafe(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); |
10488 | } |
10489 | at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { |
10490 | return wrapper_CompositeImplicitAutograd___sparse_csc_tensor_unsafe(ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10491 | } |
10492 | at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10493 | return wrapper_CompositeImplicitAutograd___sparse_csc_tensor_unsafe(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); |
10494 | } |
10495 | at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { |
10496 | return wrapper_CompositeImplicitAutograd___sparse_bsr_tensor_unsafe(crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10497 | } |
10498 | at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10499 | return wrapper_CompositeImplicitAutograd___sparse_bsr_tensor_unsafe(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); |
10500 | } |
10501 | at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { |
10502 | return wrapper_CompositeImplicitAutograd___sparse_bsc_tensor_unsafe(ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10503 | } |
10504 | at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10505 | return wrapper_CompositeImplicitAutograd___sparse_bsc_tensor_unsafe(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); |
10506 | } |
10507 | at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) { |
10508 | return wrapper_CompositeImplicitAutograd_indices_sparse_coo_tensor(indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10509 | } |
10510 | at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10511 | return wrapper_CompositeImplicitAutograd_indices_sparse_coo_tensor(indices, values, dtype, layout, device, pin_memory); |
10512 | } |
10513 | at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { |
10514 | return wrapper_CompositeImplicitAutograd_indices_size_sparse_coo_tensor(indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10515 | } |
10516 | at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10517 | return wrapper_CompositeImplicitAutograd_indices_size_sparse_coo_tensor(indices, values, size, dtype, layout, device, pin_memory); |
10518 | } |
10519 | at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { |
10520 | return wrapper_CompositeImplicitAutograd___sparse_coo_tensor_unsafe(indices, values, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10521 | } |
10522 | at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10523 | return wrapper_CompositeImplicitAutograd___sparse_coo_tensor_unsafe(indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); |
10524 | } |
10525 | at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options) { |
10526 | return wrapper_CompositeImplicitAutograd___sparse_coo_tensor_unsafe(indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); |
10527 | } |
10528 | at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) { |
10529 | return wrapper_CompositeImplicitAutograd___sparse_coo_tensor_unsafe(indices, values, size, dtype, layout, device, pin_memory); |
10530 | } |
10531 | void _validate_sparse_coo_tensor_args(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size) { |
10532 | return wrapper_CompositeImplicitAutograd___validate_sparse_coo_tensor_args(indices, values, size); |
10533 | } |
10534 | void _validate_sparse_compressed_tensor_args(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) { |
10535 | return wrapper_CompositeImplicitAutograd___validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, values, size, layout); |
10536 | } |
10537 | void _validate_sparse_csr_tensor_args(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) { |
10538 | return wrapper_CompositeImplicitAutograd___validate_sparse_csr_tensor_args(crow_indices, col_indices, values, size); |
10539 | } |
10540 | void _validate_sparse_csc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { |
10541 | return wrapper_CompositeImplicitAutograd___validate_sparse_csc_tensor_args(ccol_indices, row_indices, values, size); |
10542 | } |
10543 | void _validate_sparse_bsr_tensor_args(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) { |
10544 | return wrapper_CompositeImplicitAutograd___validate_sparse_bsr_tensor_args(crow_indices, col_indices, values, size); |
10545 | } |
10546 | void _validate_sparse_bsc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { |
10547 | return wrapper_CompositeImplicitAutograd___validate_sparse_bsc_tensor_args(ccol_indices, row_indices, values, size); |
10548 | } |
10549 | ::std::vector<at::Tensor> _to_cpu(at::TensorList tensors) { |
10550 | return wrapper_CompositeImplicitAutograd___to_cpu(tensors); |
10551 | } |
10552 | at::Tensor to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype) { |
10553 | return wrapper_CompositeImplicitAutograd__to_dense(self, dtype); |
10554 | } |
10555 | at::Tensor to_dense_backward(const at::Tensor & grad, const at::Tensor & input) { |
10556 | return wrapper_CompositeImplicitAutograd__to_dense_backward(grad, input); |
10557 | } |
10558 | at::Tensor coalesce(const at::Tensor & self) { |
10559 | return wrapper_CompositeImplicitAutograd__coalesce(self); |
10560 | } |
10561 | ::std::vector<at::Tensor> unbind(const at::Tensor & self, at::Dimname dim) { |
10562 | return wrapper_CompositeImplicitAutograd_Dimname_unbind(self, dim); |
10563 | } |
10564 | at::Tensor to_mkldnn_backward(const at::Tensor & grad, const at::Tensor & input) { |
10565 | return wrapper_CompositeImplicitAutograd__to_mkldnn_backward(grad, input); |
10566 | } |
10567 | at::Tensor fake_quantize_per_tensor_affine(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { |
10568 | return wrapper_CompositeImplicitAutograd__fake_quantize_per_tensor_affine(self, scale, zero_point, quant_min, quant_max); |
10569 | } |
10570 | at::Tensor fake_quantize_per_tensor_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) { |
10571 | return wrapper_CompositeImplicitAutograd_tensor_qparams_fake_quantize_per_tensor_affine(self, scale, zero_point, quant_min, quant_max); |
10572 | } |
10573 | at::Tensor fake_quantize_per_tensor_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask) { |
10574 | return wrapper_CompositeImplicitAutograd__fake_quantize_per_tensor_affine_cachemask_backward(grad, mask); |
10575 | } |
10576 | at::Tensor fake_quantize_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { |
10577 | return wrapper_CompositeImplicitAutograd__fake_quantize_per_channel_affine(self, scale, zero_point, axis, quant_min, quant_max); |
10578 | } |
10579 | at::Tensor fake_quantize_per_channel_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask) { |
10580 | return wrapper_CompositeImplicitAutograd__fake_quantize_per_channel_affine_cachemask_backward(grad, mask); |
10581 | } |
10582 | at::Tensor fused_moving_avg_obs_fake_quant(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) { |
10583 | return wrapper_CompositeImplicitAutograd__fused_moving_avg_obs_fake_quant(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); |
10584 | } |
10585 | ::std::tuple<double,int64_t> _choose_qparams_per_tensor(const at::Tensor & self, bool reduce_range) { |
10586 | return wrapper_CompositeImplicitAutograd___choose_qparams_per_tensor(self, reduce_range); |
10587 | } |
10588 | at::Tensor _saturate_weight_to_fp16(const at::Tensor & weight) { |
10589 | return wrapper_CompositeImplicitAutograd___saturate_weight_to_fp16(weight); |
10590 | } |
10591 | ::std::tuple<at::Tensor,at::Tensor> choose_qparams_optimized(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) { |
10592 | return wrapper_CompositeImplicitAutograd__choose_qparams_optimized(input, numel, n_bins, ratio, bit_width); |
10593 | } |
10594 | at::Tensor _autocast_to_reduced_precision(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) { |
10595 | return wrapper_CompositeImplicitAutograd___autocast_to_reduced_precision(self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype); |
10596 | } |
10597 | at::Tensor _autocast_to_full_precision(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) { |
10598 | return wrapper_CompositeImplicitAutograd___autocast_to_full_precision(self, cuda_enabled, cpu_enabled); |
10599 | } |
10600 | at::Tensor to(const at::Tensor & self, at::TensorOptions options, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) { |
10601 | return wrapper_CompositeImplicitAutograd_dtype_layout_to(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, copy, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); |
10602 | } |
10603 | at::Tensor to(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) { |
10604 | return wrapper_CompositeImplicitAutograd_dtype_layout_to(self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format); |
10605 | } |
10606 | at::Tensor to(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) { |
10607 | return wrapper_CompositeImplicitAutograd_device_to(self, device, dtype, non_blocking, copy, memory_format); |
10608 | } |
10609 | at::Tensor to(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) { |
10610 | return wrapper_CompositeImplicitAutograd_dtype_to(self, dtype, non_blocking, copy, memory_format); |
10611 | } |
10612 | at::Tensor to(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) { |
10613 | return wrapper_CompositeImplicitAutograd_other_to(self, other, non_blocking, copy, memory_format); |
10614 | } |
10615 | ::std::vector<at::Tensor> meshgrid(at::TensorList tensors) { |
10616 | return wrapper_CompositeImplicitAutograd__meshgrid(tensors); |
10617 | } |
10618 | ::std::vector<at::Tensor> meshgrid(at::TensorList tensors, c10::string_view indexing) { |
10619 | return wrapper_CompositeImplicitAutograd_indexing_meshgrid(tensors, indexing); |
10620 | } |
10621 | at::Tensor cartesian_prod(at::TensorList tensors) { |
10622 | return wrapper_CompositeImplicitAutograd__cartesian_prod(tensors); |
10623 | } |
10624 | at::Tensor combinations(const at::Tensor & self, int64_t r, bool with_replacement) { |
10625 | return wrapper_CompositeImplicitAutograd__combinations(self, r, with_replacement); |
10626 | } |
10627 | at::Scalar item(const at::Tensor & self) { |
10628 | return wrapper_CompositeImplicitAutograd__item(self); |
10629 | } |
10630 | at::ScalarType result_type(const at::Tensor & tensor, const at::Tensor & other) { |
10631 | return wrapper_CompositeImplicitAutograd_Tensor_result_type(tensor, other); |
10632 | } |
10633 | at::ScalarType result_type(const at::Tensor & tensor, const at::Scalar & other) { |
10634 | return wrapper_CompositeImplicitAutograd_Scalar_result_type(tensor, other); |
10635 | } |
10636 | at::ScalarType result_type(const at::Scalar & scalar, const at::Tensor & tensor) { |
10637 | return wrapper_CompositeImplicitAutograd_Scalar_Tensor_result_type(scalar, tensor); |
10638 | } |
10639 | at::ScalarType result_type(const at::Scalar & scalar1, const at::Scalar & scalar2) { |
10640 | return wrapper_CompositeImplicitAutograd_Scalar_Scalar_result_type(scalar1, scalar2); |
10641 | } |
10642 | bool can_cast(at::ScalarType from, at::ScalarType to) { |
10643 | return wrapper_CompositeImplicitAutograd__can_cast(from, to); |
10644 | } |
10645 | at::ScalarType promote_types(at::ScalarType type1, at::ScalarType type2) { |
10646 | return wrapper_CompositeImplicitAutograd__promote_types(type1, type2); |
10647 | } |
10648 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { |
10649 | return wrapper_CompositeImplicitAutograd___thnn_fused_lstm_cell_backward(grad_hy, grad_cy, cx, cy, workspace, has_bias); |
10650 | } |
10651 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_lstm_cell_backward(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) { |
10652 | return wrapper_CompositeImplicitAutograd___thnn_differentiable_lstm_cell_backward(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy); |
10653 | } |
10654 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) { |
10655 | return wrapper_CompositeImplicitAutograd___thnn_differentiable_gru_cell_backward(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias); |
10656 | } |
10657 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { |
10658 | return wrapper_CompositeImplicitAutograd_input_lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); |
10659 | } |
10660 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { |
10661 | return wrapper_CompositeImplicitAutograd_data_lstm(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); |
10662 | } |
10663 | ::std::tuple<at::Tensor,at::Tensor> gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { |
10664 | return wrapper_CompositeImplicitAutograd_input_gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); |
10665 | } |
10666 | ::std::tuple<at::Tensor,at::Tensor> gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { |
10667 | return wrapper_CompositeImplicitAutograd_data_gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); |
10668 | } |
10669 | ::std::tuple<at::Tensor,at::Tensor> rnn_tanh(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { |
10670 | return wrapper_CompositeImplicitAutograd_input_rnn_tanh(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); |
10671 | } |
10672 | ::std::tuple<at::Tensor,at::Tensor> rnn_tanh(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { |
10673 | return wrapper_CompositeImplicitAutograd_data_rnn_tanh(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); |
10674 | } |
10675 | ::std::tuple<at::Tensor,at::Tensor> rnn_relu(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { |
10676 | return wrapper_CompositeImplicitAutograd_input_rnn_relu(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); |
10677 | } |
10678 | ::std::tuple<at::Tensor,at::Tensor> rnn_relu(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { |
10679 | return wrapper_CompositeImplicitAutograd_data_rnn_relu(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); |
10680 | } |
10681 | ::std::tuple<at::Tensor,at::Tensor> lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) { |
10682 | return wrapper_CompositeImplicitAutograd__lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh); |
10683 | } |
10684 | at::Tensor gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) { |
10685 | return wrapper_CompositeImplicitAutograd__gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh); |
10686 | } |
10687 | at::Tensor rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) { |
10688 | return wrapper_CompositeImplicitAutograd__rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh); |
10689 | } |
10690 | at::Tensor rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) { |
10691 | return wrapper_CompositeImplicitAutograd__rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh); |
10692 | } |
10693 | ::std::tuple<at::Tensor,at::Tensor> quantized_lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { |
10694 | return wrapper_CompositeImplicitAutograd__quantized_lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); |
10695 | } |
10696 | at::Tensor quantized_gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { |
10697 | return wrapper_CompositeImplicitAutograd__quantized_gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); |
10698 | } |
10699 | at::Tensor quantized_rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { |
10700 | return wrapper_CompositeImplicitAutograd__quantized_rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); |
10701 | } |
10702 | at::Tensor quantized_rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { |
10703 | return wrapper_CompositeImplicitAutograd__quantized_rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); |
10704 | } |
10705 | at::Tensor _pack_padded_sequence_backward(const at::Tensor & grad, at::IntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) { |
10706 | return wrapper_CompositeImplicitAutograd___pack_padded_sequence_backward(grad, c10::fromIntArrayRefSlow(input_size), batch_sizes, batch_first); |
10707 | } |
10708 | at::Tensor _pack_padded_sequence_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) { |
10709 | return wrapper_CompositeImplicitAutograd___pack_padded_sequence_backward(grad, input_size, batch_sizes, batch_first); |
10710 | } |
10711 | ::std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) { |
10712 | return wrapper_CompositeImplicitAutograd___pad_packed_sequence(data, batch_sizes, batch_first, padding_value, total_length); |
10713 | } |
10714 | at::Tensor & set_(at::Tensor & self, const at::Tensor & source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride) { |
10715 | return wrapper_CompositeImplicitAutograd_source_Tensor_storage_offset_set_(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); |
10716 | } |
10717 | at::Tensor & set__symint(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
10718 | return wrapper_CompositeImplicitAutograd_source_Tensor_storage_offset_set_(self, source, storage_offset, size, stride); |
10719 | } |
10720 | at::Tensor index_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) { |
10721 | return wrapper_CompositeImplicitAutograd_dimname_index_add(self, dim, index, source, alpha); |
10722 | } |
10723 | at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { |
10724 | return wrapper_CompositeImplicitAutograd_Dimname_Scalar_index_fill_(self, dim, index, value); |
10725 | } |
10726 | at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { |
10727 | return wrapper_CompositeImplicitAutograd_Dimname_Scalar_index_fill(self, dim, index, value); |
10728 | } |
10729 | at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { |
10730 | return wrapper_CompositeImplicitAutograd_Dimname_Tensor_index_fill_(self, dim, index, value); |
10731 | } |
10732 | at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { |
10733 | return wrapper_CompositeImplicitAutograd_Dimname_Tensor_index_fill(self, dim, index, value); |
10734 | } |
10735 | at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { |
10736 | return wrapper_CompositeImplicitAutograd_dimname_src_scatter(self, dim, index, src); |
10737 | } |
10738 | at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { |
10739 | return wrapper_CompositeImplicitAutograd_dimname_value_scatter(self, dim, index, value); |
10740 | } |
10741 | at::Tensor scatter_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { |
10742 | return wrapper_CompositeImplicitAutograd_dimname_scatter_add(self, dim, index, src); |
10743 | } |
10744 | at::Tensor & bitwise_and_(at::Tensor & self, const at::Scalar & other) { |
10745 | return wrapper_CompositeImplicitAutograd_Scalar_bitwise_and_(self, other); |
10746 | } |
10747 | at::Tensor __and__(const at::Tensor & self, const at::Scalar & other) { |
10748 | return wrapper_CompositeImplicitAutograd_Scalar___and__(self, other); |
10749 | } |
10750 | at::Tensor & __iand__(at::Tensor & self, const at::Scalar & other) { |
10751 | return wrapper_CompositeImplicitAutograd_Scalar___iand__(self, other); |
10752 | } |
10753 | at::Tensor __and__(const at::Tensor & self, const at::Tensor & other) { |
10754 | return wrapper_CompositeImplicitAutograd_Tensor___and__(self, other); |
10755 | } |
10756 | at::Tensor & __iand__(at::Tensor & self, const at::Tensor & other) { |
10757 | return wrapper_CompositeImplicitAutograd_Tensor___iand__(self, other); |
10758 | } |
10759 | at::Tensor bitwise_or(const at::Tensor & self, const at::Scalar & other) { |
10760 | return wrapper_CompositeImplicitAutograd_Scalar_bitwise_or(self, other); |
10761 | } |
10762 | at::Tensor & bitwise_or_(at::Tensor & self, const at::Scalar & other) { |
10763 | return wrapper_CompositeImplicitAutograd_Scalar_bitwise_or_(self, other); |
10764 | } |
10765 | at::Tensor __or__(const at::Tensor & self, const at::Scalar & other) { |
10766 | return wrapper_CompositeImplicitAutograd_Scalar___or__(self, other); |
10767 | } |
10768 | at::Tensor & __ior__(at::Tensor & self, const at::Scalar & other) { |
10769 | return wrapper_CompositeImplicitAutograd_Scalar___ior__(self, other); |
10770 | } |
10771 | at::Tensor __or__(const at::Tensor & self, const at::Tensor & other) { |
10772 | return wrapper_CompositeImplicitAutograd_Tensor___or__(self, other); |
10773 | } |
10774 | at::Tensor & __ior__(at::Tensor & self, const at::Tensor & other) { |
10775 | return wrapper_CompositeImplicitAutograd_Tensor___ior__(self, other); |
10776 | } |
10777 | at::Tensor bitwise_xor(const at::Tensor & self, const at::Scalar & other) { |
10778 | return wrapper_CompositeImplicitAutograd_Scalar_bitwise_xor(self, other); |
10779 | } |
10780 | at::Tensor & bitwise_xor_(at::Tensor & self, const at::Scalar & other) { |
10781 | return wrapper_CompositeImplicitAutograd_Scalar_bitwise_xor_(self, other); |
10782 | } |
10783 | at::Tensor __xor__(const at::Tensor & self, const at::Scalar & other) { |
10784 | return wrapper_CompositeImplicitAutograd_Scalar___xor__(self, other); |
10785 | } |
10786 | at::Tensor & __ixor__(at::Tensor & self, const at::Scalar & other) { |
10787 | return wrapper_CompositeImplicitAutograd_Scalar___ixor__(self, other); |
10788 | } |
10789 | at::Tensor __xor__(const at::Tensor & self, const at::Tensor & other) { |
10790 | return wrapper_CompositeImplicitAutograd_Tensor___xor__(self, other); |
10791 | } |
10792 | at::Tensor & __ixor__(at::Tensor & self, const at::Tensor & other) { |
10793 | return wrapper_CompositeImplicitAutograd_Tensor___ixor__(self, other); |
10794 | } |
10795 | at::Tensor diag(const at::Tensor & self, int64_t diagonal) { |
10796 | return wrapper_CompositeImplicitAutograd__diag(self, diagonal); |
10797 | } |
10798 | at::Tensor & diag_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal) { |
10799 | return wrapper_CompositeImplicitAutograd_out_diag_out(self, diagonal, out); |
10800 | } |
10801 | at::Tensor & diag_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out) { |
10802 | return wrapper_CompositeImplicitAutograd_out_diag_out(self, diagonal, out); |
10803 | } |
10804 | at::Tensor cross(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim) { |
10805 | return wrapper_CompositeImplicitAutograd__cross(self, other, dim); |
10806 | } |
10807 | at::Tensor & cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim) { |
10808 | return wrapper_CompositeImplicitAutograd_out_cross_out(self, other, dim, out); |
10809 | } |
10810 | at::Tensor & cross_outf(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim, at::Tensor & out) { |
10811 | return wrapper_CompositeImplicitAutograd_out_cross_out(self, other, dim, out); |
10812 | } |
10813 | at::Tensor trace_backward(const at::Tensor & grad, at::IntArrayRef sizes) { |
10814 | return wrapper_CompositeImplicitAutograd__trace_backward(grad, c10::fromIntArrayRefSlow(sizes)); |
10815 | } |
10816 | at::Tensor trace_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef sizes) { |
10817 | return wrapper_CompositeImplicitAutograd__trace_backward(grad, sizes); |
10818 | } |
10819 | at::Tensor not_equal(const at::Tensor & self, const at::Scalar & other) { |
10820 | return wrapper_CompositeImplicitAutograd_Scalar_not_equal(self, other); |
10821 | } |
10822 | at::Tensor & not_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
10823 | return wrapper_CompositeImplicitAutograd_Scalar_out_not_equal_out(self, other, out); |
10824 | } |
10825 | at::Tensor & not_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
10826 | return wrapper_CompositeImplicitAutograd_Scalar_out_not_equal_out(self, other, out); |
10827 | } |
10828 | at::Tensor & not_equal_(at::Tensor & self, const at::Scalar & other) { |
10829 | return wrapper_CompositeImplicitAutograd_Scalar_not_equal_(self, other); |
10830 | } |
10831 | at::Tensor not_equal(const at::Tensor & self, const at::Tensor & other) { |
10832 | return wrapper_CompositeImplicitAutograd_Tensor_not_equal(self, other); |
10833 | } |
10834 | at::Tensor & not_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
10835 | return wrapper_CompositeImplicitAutograd_Tensor_out_not_equal_out(self, other, out); |
10836 | } |
10837 | at::Tensor & not_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
10838 | return wrapper_CompositeImplicitAutograd_Tensor_out_not_equal_out(self, other, out); |
10839 | } |
10840 | at::Tensor & not_equal_(at::Tensor & self, const at::Tensor & other) { |
10841 | return wrapper_CompositeImplicitAutograd_Tensor_not_equal_(self, other); |
10842 | } |
10843 | at::Tensor greater_equal(const at::Tensor & self, const at::Scalar & other) { |
10844 | return wrapper_CompositeImplicitAutograd_Scalar_greater_equal(self, other); |
10845 | } |
10846 | at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
10847 | return wrapper_CompositeImplicitAutograd_Scalar_out_greater_equal_out(self, other, out); |
10848 | } |
10849 | at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
10850 | return wrapper_CompositeImplicitAutograd_Scalar_out_greater_equal_out(self, other, out); |
10851 | } |
10852 | at::Tensor & greater_equal_(at::Tensor & self, const at::Scalar & other) { |
10853 | return wrapper_CompositeImplicitAutograd_Scalar_greater_equal_(self, other); |
10854 | } |
10855 | at::Tensor greater_equal(const at::Tensor & self, const at::Tensor & other) { |
10856 | return wrapper_CompositeImplicitAutograd_Tensor_greater_equal(self, other); |
10857 | } |
10858 | at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
10859 | return wrapper_CompositeImplicitAutograd_Tensor_out_greater_equal_out(self, other, out); |
10860 | } |
10861 | at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
10862 | return wrapper_CompositeImplicitAutograd_Tensor_out_greater_equal_out(self, other, out); |
10863 | } |
10864 | at::Tensor & greater_equal_(at::Tensor & self, const at::Tensor & other) { |
10865 | return wrapper_CompositeImplicitAutograd_Tensor_greater_equal_(self, other); |
10866 | } |
10867 | at::Tensor less_equal(const at::Tensor & self, const at::Scalar & other) { |
10868 | return wrapper_CompositeImplicitAutograd_Scalar_less_equal(self, other); |
10869 | } |
10870 | at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
10871 | return wrapper_CompositeImplicitAutograd_Scalar_out_less_equal_out(self, other, out); |
10872 | } |
10873 | at::Tensor & less_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
10874 | return wrapper_CompositeImplicitAutograd_Scalar_out_less_equal_out(self, other, out); |
10875 | } |
10876 | at::Tensor & less_equal_(at::Tensor & self, const at::Scalar & other) { |
10877 | return wrapper_CompositeImplicitAutograd_Scalar_less_equal_(self, other); |
10878 | } |
10879 | at::Tensor less_equal(const at::Tensor & self, const at::Tensor & other) { |
10880 | return wrapper_CompositeImplicitAutograd_Tensor_less_equal(self, other); |
10881 | } |
10882 | at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
10883 | return wrapper_CompositeImplicitAutograd_Tensor_out_less_equal_out(self, other, out); |
10884 | } |
10885 | at::Tensor & less_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
10886 | return wrapper_CompositeImplicitAutograd_Tensor_out_less_equal_out(self, other, out); |
10887 | } |
10888 | at::Tensor & less_equal_(at::Tensor & self, const at::Tensor & other) { |
10889 | return wrapper_CompositeImplicitAutograd_Tensor_less_equal_(self, other); |
10890 | } |
10891 | at::Tensor greater(const at::Tensor & self, const at::Scalar & other) { |
10892 | return wrapper_CompositeImplicitAutograd_Scalar_greater(self, other); |
10893 | } |
10894 | at::Tensor & greater_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
10895 | return wrapper_CompositeImplicitAutograd_Scalar_out_greater_out(self, other, out); |
10896 | } |
10897 | at::Tensor & greater_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
10898 | return wrapper_CompositeImplicitAutograd_Scalar_out_greater_out(self, other, out); |
10899 | } |
10900 | at::Tensor & greater_(at::Tensor & self, const at::Scalar & other) { |
10901 | return wrapper_CompositeImplicitAutograd_Scalar_greater_(self, other); |
10902 | } |
10903 | at::Tensor greater(const at::Tensor & self, const at::Tensor & other) { |
10904 | return wrapper_CompositeImplicitAutograd_Tensor_greater(self, other); |
10905 | } |
10906 | at::Tensor & greater_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
10907 | return wrapper_CompositeImplicitAutograd_Tensor_out_greater_out(self, other, out); |
10908 | } |
10909 | at::Tensor & greater_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
10910 | return wrapper_CompositeImplicitAutograd_Tensor_out_greater_out(self, other, out); |
10911 | } |
10912 | at::Tensor & greater_(at::Tensor & self, const at::Tensor & other) { |
10913 | return wrapper_CompositeImplicitAutograd_Tensor_greater_(self, other); |
10914 | } |
10915 | at::Tensor less(const at::Tensor & self, const at::Scalar & other) { |
10916 | return wrapper_CompositeImplicitAutograd_Scalar_less(self, other); |
10917 | } |
10918 | at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
10919 | return wrapper_CompositeImplicitAutograd_Scalar_out_less_out(self, other, out); |
10920 | } |
10921 | at::Tensor & less_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
10922 | return wrapper_CompositeImplicitAutograd_Scalar_out_less_out(self, other, out); |
10923 | } |
10924 | at::Tensor & less_(at::Tensor & self, const at::Scalar & other) { |
10925 | return wrapper_CompositeImplicitAutograd_Scalar_less_(self, other); |
10926 | } |
10927 | at::Tensor less(const at::Tensor & self, const at::Tensor & other) { |
10928 | return wrapper_CompositeImplicitAutograd_Tensor_less(self, other); |
10929 | } |
10930 | at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
10931 | return wrapper_CompositeImplicitAutograd_Tensor_out_less_out(self, other, out); |
10932 | } |
10933 | at::Tensor & less_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
10934 | return wrapper_CompositeImplicitAutograd_Tensor_out_less_out(self, other, out); |
10935 | } |
10936 | at::Tensor & less_(at::Tensor & self, const at::Tensor & other) { |
10937 | return wrapper_CompositeImplicitAutograd_Tensor_less_(self, other); |
10938 | } |
10939 | at::Tensor take_along_dim(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim) { |
10940 | return wrapper_CompositeImplicitAutograd__take_along_dim(self, indices, dim); |
10941 | } |
10942 | at::Tensor & take_along_dim_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim) { |
10943 | return wrapper_CompositeImplicitAutograd_out_take_along_dim_out(self, indices, dim, out); |
10944 | } |
10945 | at::Tensor & take_along_dim_outf(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim, at::Tensor & out) { |
10946 | return wrapper_CompositeImplicitAutograd_out_take_along_dim_out(self, indices, dim, out); |
10947 | } |
10948 | at::Tensor index_select(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) { |
10949 | return wrapper_CompositeImplicitAutograd_dimname_index_select(self, dim, index); |
10950 | } |
10951 | at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index) { |
10952 | return wrapper_CompositeImplicitAutograd_dimname_out_index_select_out(self, dim, index, out); |
10953 | } |
10954 | at::Tensor & index_select_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) { |
10955 | return wrapper_CompositeImplicitAutograd_dimname_out_index_select_out(self, dim, index, out); |
10956 | } |
10957 | at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { |
10958 | return wrapper_CompositeImplicitAutograd__index_select_backward(grad, c10::fromIntArrayRefSlow(self_sizes), dim, index); |
10959 | } |
10960 | at::Tensor index_select_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { |
10961 | return wrapper_CompositeImplicitAutograd__index_select_backward(grad, self_sizes, dim, index); |
10962 | } |
10963 | at::Tensor masked_select_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) { |
10964 | return wrapper_CompositeImplicitAutograd__masked_select_backward(grad, input, mask); |
10965 | } |
10966 | ::std::vector<at::Tensor> nonzero_numpy(const at::Tensor & self) { |
10967 | return wrapper_CompositeImplicitAutograd__nonzero_numpy(self); |
10968 | } |
10969 | at::Tensor argwhere(const at::Tensor & self) { |
10970 | return wrapper_CompositeImplicitAutograd__argwhere(self); |
10971 | } |
10972 | at::Tensor gather_backward(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) { |
10973 | return wrapper_CompositeImplicitAutograd__gather_backward(grad, self, dim, index, sparse_grad); |
10974 | } |
10975 | at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) { |
10976 | return wrapper_CompositeImplicitAutograd_dimname_gather(self, dim, index, sparse_grad); |
10977 | } |
10978 | at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) { |
10979 | return wrapper_CompositeImplicitAutograd_dimname_out_gather_out(self, dim, index, sparse_grad, out); |
10980 | } |
10981 | at::Tensor & gather_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { |
10982 | return wrapper_CompositeImplicitAutograd_dimname_out_gather_out(self, dim, index, sparse_grad, out); |
10983 | } |
10984 | at::Tensor _gather_sparse_backward(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) { |
10985 | return wrapper_CompositeImplicitAutograd___gather_sparse_backward(self, dim, index, grad); |
10986 | } |
10987 | at::Tensor cross_entropy_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, double label_smoothing) { |
10988 | return wrapper_CompositeImplicitAutograd__cross_entropy_loss(self, target, weight, reduction, ignore_index, label_smoothing); |
10989 | } |
10990 | at::Tensor cross_entropy_loss_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) { |
10991 | return wrapper_CompositeImplicitAutograd__cross_entropy_loss(self, target, weight, reduction, ignore_index, label_smoothing); |
10992 | } |
10993 | at::Tensor linalg_vander(const at::Tensor & x, c10::optional<int64_t> N) { |
10994 | return wrapper_CompositeImplicitAutograd__linalg_vander(x, N); |
10995 | } |
10996 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> svd(const at::Tensor & self, bool some, bool compute_uv) { |
10997 | return wrapper_CompositeImplicitAutograd__svd(self, some, compute_uv); |
10998 | } |
10999 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & V, const at::Tensor & self, bool some, bool compute_uv) { |
11000 | return wrapper_CompositeImplicitAutograd_U_svd_out(self, some, compute_uv, U, S, V); |
11001 | } |
11002 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> svd_outf(const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) { |
11003 | return wrapper_CompositeImplicitAutograd_U_svd_out(self, some, compute_uv, U, S, V); |
11004 | } |
11005 | at::Tensor swapaxes(const at::Tensor & self, int64_t axis0, int64_t axis1) { |
11006 | return wrapper_CompositeImplicitAutograd__swapaxes(self, axis0, axis1); |
11007 | } |
11008 | at::Tensor & swapaxes_(at::Tensor & self, int64_t axis0, int64_t axis1) { |
11009 | return wrapper_CompositeImplicitAutograd__swapaxes_(self, axis0, axis1); |
11010 | } |
11011 | at::Tensor swapdims(const at::Tensor & self, int64_t dim0, int64_t dim1) { |
11012 | return wrapper_CompositeImplicitAutograd__swapdims(self, dim0, dim1); |
11013 | } |
11014 | at::Tensor & swapdims_(at::Tensor & self, int64_t dim0, int64_t dim1) { |
11015 | return wrapper_CompositeImplicitAutograd__swapdims_(self, dim0, dim1); |
11016 | } |
11017 | ::std::tuple<at::Tensor,at::Tensor> qr(const at::Tensor & self, bool some) { |
11018 | return wrapper_CompositeImplicitAutograd__qr(self, some); |
11019 | } |
11020 | ::std::tuple<at::Tensor &,at::Tensor &> qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & self, bool some) { |
11021 | return wrapper_CompositeImplicitAutograd_Q_qr_out(self, some, Q, R); |
11022 | } |
11023 | ::std::tuple<at::Tensor &,at::Tensor &> qr_outf(const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R) { |
11024 | return wrapper_CompositeImplicitAutograd_Q_qr_out(self, some, Q, R); |
11025 | } |
11026 | at::Tensor orgqr(const at::Tensor & self, const at::Tensor & input2) { |
11027 | return wrapper_CompositeImplicitAutograd__orgqr(self, input2); |
11028 | } |
11029 | at::Tensor & orgqr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2) { |
11030 | return wrapper_CompositeImplicitAutograd_out_orgqr_out(self, input2, out); |
11031 | } |
11032 | at::Tensor & orgqr_outf(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) { |
11033 | return wrapper_CompositeImplicitAutograd_out_orgqr_out(self, input2, out); |
11034 | } |
11035 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _lu_with_info(const at::Tensor & self, bool pivot, bool check_errors) { |
11036 | return wrapper_CompositeImplicitAutograd___lu_with_info(self, pivot, check_errors); |
11037 | } |
11038 | at::Tensor lu_solve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { |
11039 | return wrapper_CompositeImplicitAutograd__lu_solve(self, LU_data, LU_pivots); |
11040 | } |
11041 | at::Tensor & lu_solve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { |
11042 | return wrapper_CompositeImplicitAutograd_out_lu_solve_out(self, LU_data, LU_pivots, out); |
11043 | } |
11044 | at::Tensor & lu_solve_outf(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) { |
11045 | return wrapper_CompositeImplicitAutograd_out_lu_solve_out(self, LU_data, LU_pivots, out); |
11046 | } |
11047 | at::Tensor arctan2(const at::Tensor & self, const at::Tensor & other) { |
11048 | return wrapper_CompositeImplicitAutograd__arctan2(self, other); |
11049 | } |
11050 | at::Tensor & arctan2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
11051 | return wrapper_CompositeImplicitAutograd_out_arctan2_out(self, other, out); |
11052 | } |
11053 | at::Tensor & arctan2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11054 | return wrapper_CompositeImplicitAutograd_out_arctan2_out(self, other, out); |
11055 | } |
11056 | at::Tensor & arctan2_(at::Tensor & self, const at::Tensor & other) { |
11057 | return wrapper_CompositeImplicitAutograd__arctan2_(self, other); |
11058 | } |
11059 | ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) { |
11060 | return wrapper_CompositeImplicitAutograd__histogramdd(self, bins, range, weight, density); |
11061 | } |
11062 | ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) { |
11063 | return wrapper_CompositeImplicitAutograd_int_bins_histogramdd(self, bins, range, weight, density); |
11064 | } |
11065 | ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd(const at::Tensor & self, at::TensorList bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) { |
11066 | return wrapper_CompositeImplicitAutograd_TensorList_bins_histogramdd(self, bins, range, weight, density); |
11067 | } |
11068 | at::Tensor max(const at::Tensor & self, const at::Tensor & other) { |
11069 | return wrapper_CompositeImplicitAutograd_other_max(self, other); |
11070 | } |
11071 | at::Tensor & max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
11072 | return wrapper_CompositeImplicitAutograd_out_max_out(self, other, out); |
11073 | } |
11074 | at::Tensor & max_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11075 | return wrapper_CompositeImplicitAutograd_out_max_out(self, other, out); |
11076 | } |
11077 | at::Tensor min(const at::Tensor & self, const at::Tensor & other) { |
11078 | return wrapper_CompositeImplicitAutograd_other_min(self, other); |
11079 | } |
11080 | at::Tensor & min_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
11081 | return wrapper_CompositeImplicitAutograd_out_min_out(self, other, out); |
11082 | } |
11083 | at::Tensor & min_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11084 | return wrapper_CompositeImplicitAutograd_out_min_out(self, other, out); |
11085 | } |
11086 | at::Tensor quantile(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) { |
11087 | return wrapper_CompositeImplicitAutograd__quantile(self, q, dim, keepdim, interpolation); |
11088 | } |
11089 | at::Tensor & quantile_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) { |
11090 | return wrapper_CompositeImplicitAutograd_out_quantile_out(self, q, dim, keepdim, interpolation, out); |
11091 | } |
11092 | at::Tensor & quantile_outf(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { |
11093 | return wrapper_CompositeImplicitAutograd_out_quantile_out(self, q, dim, keepdim, interpolation, out); |
11094 | } |
11095 | at::Tensor quantile(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) { |
11096 | return wrapper_CompositeImplicitAutograd_scalar_quantile(self, q, dim, keepdim, interpolation); |
11097 | } |
11098 | at::Tensor & quantile_out(at::Tensor & out, const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) { |
11099 | return wrapper_CompositeImplicitAutograd_scalar_out_quantile_out(self, q, dim, keepdim, interpolation, out); |
11100 | } |
11101 | at::Tensor & quantile_outf(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { |
11102 | return wrapper_CompositeImplicitAutograd_scalar_out_quantile_out(self, q, dim, keepdim, interpolation, out); |
11103 | } |
11104 | at::Tensor nanquantile(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) { |
11105 | return wrapper_CompositeImplicitAutograd__nanquantile(self, q, dim, keepdim, interpolation); |
11106 | } |
11107 | at::Tensor & nanquantile_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) { |
11108 | return wrapper_CompositeImplicitAutograd_out_nanquantile_out(self, q, dim, keepdim, interpolation, out); |
11109 | } |
11110 | at::Tensor & nanquantile_outf(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { |
11111 | return wrapper_CompositeImplicitAutograd_out_nanquantile_out(self, q, dim, keepdim, interpolation, out); |
11112 | } |
11113 | at::Tensor nanquantile(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) { |
11114 | return wrapper_CompositeImplicitAutograd_scalar_nanquantile(self, q, dim, keepdim, interpolation); |
11115 | } |
11116 | at::Tensor & nanquantile_out(at::Tensor & out, const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) { |
11117 | return wrapper_CompositeImplicitAutograd_scalar_out_nanquantile_out(self, q, dim, keepdim, interpolation, out); |
11118 | } |
11119 | at::Tensor & nanquantile_outf(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { |
11120 | return wrapper_CompositeImplicitAutograd_scalar_out_nanquantile_out(self, q, dim, keepdim, interpolation, out); |
11121 | } |
11122 | ::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, at::Dimname dim, bool descending) { |
11123 | return wrapper_CompositeImplicitAutograd_dimname_sort(self, dim, descending); |
11124 | } |
11125 | ::std::tuple<at::Tensor &,at::Tensor &> sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool descending) { |
11126 | return wrapper_CompositeImplicitAutograd_dimname_values_sort_out(self, dim, descending, values, indices); |
11127 | } |
11128 | ::std::tuple<at::Tensor &,at::Tensor &> sort_outf(const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) { |
11129 | return wrapper_CompositeImplicitAutograd_dimname_values_sort_out(self, dim, descending, values, indices); |
11130 | } |
11131 | ::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending) { |
11132 | return wrapper_CompositeImplicitAutograd_dimname_stable_sort(self, stable, dim, descending); |
11133 | } |
11134 | ::std::tuple<at::Tensor &,at::Tensor &> sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending) { |
11135 | return wrapper_CompositeImplicitAutograd_dimname_values_stable_sort_out(self, stable, dim, descending, values, indices); |
11136 | } |
11137 | ::std::tuple<at::Tensor &,at::Tensor &> sort_outf(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) { |
11138 | return wrapper_CompositeImplicitAutograd_dimname_values_stable_sort_out(self, stable, dim, descending, values, indices); |
11139 | } |
11140 | at::Tensor msort(const at::Tensor & self) { |
11141 | return wrapper_CompositeImplicitAutograd__msort(self); |
11142 | } |
11143 | at::Tensor & msort_out(at::Tensor & out, const at::Tensor & self) { |
11144 | return wrapper_CompositeImplicitAutograd_out_msort_out(self, out); |
11145 | } |
11146 | at::Tensor & msort_outf(const at::Tensor & self, at::Tensor & out) { |
11147 | return wrapper_CompositeImplicitAutograd_out_msort_out(self, out); |
11148 | } |
11149 | at::Tensor argsort(const at::Tensor & self, int64_t dim, bool descending) { |
11150 | return wrapper_CompositeImplicitAutograd__argsort(self, dim, descending); |
11151 | } |
11152 | at::Tensor argsort(const at::Tensor & self, at::Dimname dim, bool descending) { |
11153 | return wrapper_CompositeImplicitAutograd_dimname_argsort(self, dim, descending); |
11154 | } |
11155 | at::Tensor float_power(const at::Tensor & self, const at::Tensor & exponent) { |
11156 | return wrapper_CompositeImplicitAutograd_Tensor_Tensor_float_power(self, exponent); |
11157 | } |
11158 | at::Tensor & float_power_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent) { |
11159 | return wrapper_CompositeImplicitAutograd_Tensor_Tensor_out_float_power_out(self, exponent, out); |
11160 | } |
11161 | at::Tensor & float_power_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) { |
11162 | return wrapper_CompositeImplicitAutograd_Tensor_Tensor_out_float_power_out(self, exponent, out); |
11163 | } |
11164 | at::Tensor & float_power_(at::Tensor & self, const at::Tensor & exponent) { |
11165 | return wrapper_CompositeImplicitAutograd_Tensor_float_power_(self, exponent); |
11166 | } |
11167 | at::Tensor float_power(const at::Scalar & self, const at::Tensor & exponent) { |
11168 | return wrapper_CompositeImplicitAutograd_Scalar_float_power(self, exponent); |
11169 | } |
11170 | at::Tensor & float_power_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent) { |
11171 | return wrapper_CompositeImplicitAutograd_Scalar_out_float_power_out(self, exponent, out); |
11172 | } |
11173 | at::Tensor & float_power_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) { |
11174 | return wrapper_CompositeImplicitAutograd_Scalar_out_float_power_out(self, exponent, out); |
11175 | } |
11176 | at::Tensor float_power(const at::Tensor & self, const at::Scalar & exponent) { |
11177 | return wrapper_CompositeImplicitAutograd_Tensor_Scalar_float_power(self, exponent); |
11178 | } |
11179 | at::Tensor & float_power_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) { |
11180 | return wrapper_CompositeImplicitAutograd_Tensor_Scalar_out_float_power_out(self, exponent, out); |
11181 | } |
11182 | at::Tensor & float_power_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) { |
11183 | return wrapper_CompositeImplicitAutograd_Tensor_Scalar_out_float_power_out(self, exponent, out); |
11184 | } |
11185 | at::Tensor & float_power_(at::Tensor & self, const at::Scalar & exponent) { |
11186 | return wrapper_CompositeImplicitAutograd_Scalar_float_power_(self, exponent); |
11187 | } |
11188 | at::Tensor l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { |
11189 | return wrapper_CompositeImplicitAutograd__l1_loss(self, target, reduction); |
11190 | } |
11191 | at::Tensor multilabel_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { |
11192 | return wrapper_CompositeImplicitAutograd__multilabel_margin_loss(self, target, reduction); |
11193 | } |
11194 | at::Tensor & multilabel_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { |
11195 | return wrapper_CompositeImplicitAutograd_out_multilabel_margin_loss_out(self, target, reduction, out); |
11196 | } |
11197 | at::Tensor & multilabel_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) { |
11198 | return wrapper_CompositeImplicitAutograd_out_multilabel_margin_loss_out(self, target, reduction, out); |
11199 | } |
11200 | at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) { |
11201 | return wrapper_CompositeImplicitAutograd__nll_loss(self, target, weight, reduction, ignore_index); |
11202 | } |
11203 | at::Tensor nll_loss_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) { |
11204 | return wrapper_CompositeImplicitAutograd__nll_loss(self, target, weight, reduction, ignore_index); |
11205 | } |
11206 | at::Tensor & nll_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) { |
11207 | return wrapper_CompositeImplicitAutograd_out_nll_loss_out(self, target, weight, reduction, ignore_index, out); |
11208 | } |
11209 | at::Tensor & nll_loss_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out) { |
11210 | return wrapper_CompositeImplicitAutograd_out_nll_loss_out(self, target, weight, reduction, ignore_index, out); |
11211 | } |
11212 | at::Tensor & nll_loss_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) { |
11213 | return wrapper_CompositeImplicitAutograd_out_nll_loss_out(self, target, weight, reduction, ignore_index, out); |
11214 | } |
11215 | at::Tensor & nll_loss_symint_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) { |
11216 | return wrapper_CompositeImplicitAutograd_out_nll_loss_out(self, target, weight, reduction, ignore_index, out); |
11217 | } |
11218 | at::Tensor nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) { |
11219 | return wrapper_CompositeImplicitAutograd__nll_loss_nd(self, target, weight, reduction, ignore_index); |
11220 | } |
11221 | at::Tensor nll_loss_nd_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) { |
11222 | return wrapper_CompositeImplicitAutograd__nll_loss_nd(self, target, weight, reduction, ignore_index); |
11223 | } |
11224 | at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) { |
11225 | return wrapper_CompositeImplicitAutograd__nll_loss2d(self, target, weight, reduction, ignore_index); |
11226 | } |
11227 | at::Tensor nll_loss2d_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) { |
11228 | return wrapper_CompositeImplicitAutograd__nll_loss2d(self, target, weight, reduction, ignore_index); |
11229 | } |
11230 | at::Tensor & nll_loss2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) { |
11231 | return wrapper_CompositeImplicitAutograd_out_nll_loss2d_out(self, target, weight, reduction, ignore_index, out); |
11232 | } |
11233 | at::Tensor & nll_loss2d_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out) { |
11234 | return wrapper_CompositeImplicitAutograd_out_nll_loss2d_out(self, target, weight, reduction, ignore_index, out); |
11235 | } |
11236 | at::Tensor & nll_loss2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) { |
11237 | return wrapper_CompositeImplicitAutograd_out_nll_loss2d_out(self, target, weight, reduction, ignore_index, out); |
11238 | } |
11239 | at::Tensor & nll_loss2d_symint_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) { |
11240 | return wrapper_CompositeImplicitAutograd_out_nll_loss2d_out(self, target, weight, reduction, ignore_index, out); |
11241 | } |
11242 | at::Tensor log_sigmoid(const at::Tensor & self) { |
11243 | return wrapper_CompositeImplicitAutograd__log_sigmoid(self); |
11244 | } |
11245 | at::Tensor & log_sigmoid_out(at::Tensor & out, const at::Tensor & self) { |
11246 | return wrapper_CompositeImplicitAutograd_out_log_sigmoid_out(self, out); |
11247 | } |
11248 | at::Tensor & log_sigmoid_outf(const at::Tensor & self, at::Tensor & out) { |
11249 | return wrapper_CompositeImplicitAutograd_out_log_sigmoid_out(self, out); |
11250 | } |
11251 | at::Tensor adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size) { |
11252 | return wrapper_CompositeImplicitAutograd__adaptive_avg_pool2d(self, c10::fromIntArrayRefSlow(output_size)); |
11253 | } |
11254 | at::Tensor adaptive_avg_pool2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) { |
11255 | return wrapper_CompositeImplicitAutograd__adaptive_avg_pool2d(self, output_size); |
11256 | } |
11257 | at::Tensor adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size) { |
11258 | return wrapper_CompositeImplicitAutograd__adaptive_avg_pool3d(self, c10::fromIntArrayRefSlow(output_size)); |
11259 | } |
11260 | at::Tensor adaptive_avg_pool3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) { |
11261 | return wrapper_CompositeImplicitAutograd__adaptive_avg_pool3d(self, output_size); |
11262 | } |
11263 | at::Tensor _pad_circular(const at::Tensor & self, at::IntArrayRef pad) { |
11264 | return wrapper_CompositeImplicitAutograd___pad_circular(self, c10::fromIntArrayRefSlow(pad)); |
11265 | } |
11266 | at::Tensor _pad_circular_symint(const at::Tensor & self, c10::SymIntArrayRef pad) { |
11267 | return wrapper_CompositeImplicitAutograd___pad_circular(self, pad); |
11268 | } |
11269 | at::Tensor _pad_enum(const at::Tensor & self, at::IntArrayRef pad, int64_t mode, c10::optional<double> value) { |
11270 | return wrapper_CompositeImplicitAutograd___pad_enum(self, c10::fromIntArrayRefSlow(pad), mode, value); |
11271 | } |
11272 | at::Tensor _pad_enum_symint(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional<double> value) { |
11273 | return wrapper_CompositeImplicitAutograd___pad_enum(self, pad, mode, value); |
11274 | } |
11275 | at::Tensor pad(const at::Tensor & self, at::IntArrayRef pad, c10::string_view mode, c10::optional<double> value) { |
11276 | return wrapper_CompositeImplicitAutograd__pad(self, c10::fromIntArrayRefSlow(pad), mode, value); |
11277 | } |
11278 | at::Tensor pad_symint(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional<double> value) { |
11279 | return wrapper_CompositeImplicitAutograd__pad(self, pad, mode, value); |
11280 | } |
11281 | at::Tensor upsample_linear1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
11282 | return wrapper_CompositeImplicitAutograd_vec_upsample_linear1d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); |
11283 | } |
11284 | at::Tensor upsample_linear1d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
11285 | return wrapper_CompositeImplicitAutograd_vec_upsample_linear1d(input, output_size, align_corners, scale_factors); |
11286 | } |
11287 | at::Tensor upsample_bilinear2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
11288 | return wrapper_CompositeImplicitAutograd_vec_upsample_bilinear2d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); |
11289 | } |
11290 | at::Tensor upsample_bilinear2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
11291 | return wrapper_CompositeImplicitAutograd_vec_upsample_bilinear2d(input, output_size, align_corners, scale_factors); |
11292 | } |
11293 | at::Tensor _upsample_bilinear2d_aa(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
11294 | return wrapper_CompositeImplicitAutograd_vec__upsample_bilinear2d_aa(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); |
11295 | } |
11296 | at::Tensor _upsample_bilinear2d_aa_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
11297 | return wrapper_CompositeImplicitAutograd_vec__upsample_bilinear2d_aa(input, output_size, align_corners, scale_factors); |
11298 | } |
11299 | at::Tensor upsample_trilinear3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
11300 | return wrapper_CompositeImplicitAutograd_vec_upsample_trilinear3d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); |
11301 | } |
11302 | at::Tensor upsample_trilinear3d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
11303 | return wrapper_CompositeImplicitAutograd_vec_upsample_trilinear3d(input, output_size, align_corners, scale_factors); |
11304 | } |
11305 | at::Tensor upsample_bicubic2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
11306 | return wrapper_CompositeImplicitAutograd_vec_upsample_bicubic2d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); |
11307 | } |
11308 | at::Tensor upsample_bicubic2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
11309 | return wrapper_CompositeImplicitAutograd_vec_upsample_bicubic2d(input, output_size, align_corners, scale_factors); |
11310 | } |
11311 | at::Tensor _upsample_bicubic2d_aa(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
11312 | return wrapper_CompositeImplicitAutograd_vec__upsample_bicubic2d_aa(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); |
11313 | } |
11314 | at::Tensor _upsample_bicubic2d_aa_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) { |
11315 | return wrapper_CompositeImplicitAutograd_vec__upsample_bicubic2d_aa(input, output_size, align_corners, scale_factors); |
11316 | } |
11317 | at::Tensor upsample_nearest1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
11318 | return wrapper_CompositeImplicitAutograd_vec_upsample_nearest1d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors); |
11319 | } |
11320 | at::Tensor upsample_nearest1d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
11321 | return wrapper_CompositeImplicitAutograd_vec_upsample_nearest1d(input, output_size, scale_factors); |
11322 | } |
11323 | at::Tensor _upsample_nearest_exact1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
11324 | return wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact1d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors); |
11325 | } |
11326 | at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
11327 | return wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact1d(input, output_size, scale_factors); |
11328 | } |
11329 | at::Tensor upsample_nearest2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
11330 | return wrapper_CompositeImplicitAutograd_vec_upsample_nearest2d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors); |
11331 | } |
11332 | at::Tensor upsample_nearest2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
11333 | return wrapper_CompositeImplicitAutograd_vec_upsample_nearest2d(input, output_size, scale_factors); |
11334 | } |
11335 | at::Tensor _upsample_nearest_exact2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
11336 | return wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact2d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors); |
11337 | } |
11338 | at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
11339 | return wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact2d(input, output_size, scale_factors); |
11340 | } |
11341 | at::Tensor upsample_nearest3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
11342 | return wrapper_CompositeImplicitAutograd_vec_upsample_nearest3d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors); |
11343 | } |
11344 | at::Tensor upsample_nearest3d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
11345 | return wrapper_CompositeImplicitAutograd_vec_upsample_nearest3d(input, output_size, scale_factors); |
11346 | } |
11347 | at::Tensor _upsample_nearest_exact3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
11348 | return wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact3d(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors); |
11349 | } |
11350 | at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) { |
11351 | return wrapper_CompositeImplicitAutograd_vec__upsample_nearest_exact3d(input, output_size, scale_factors); |
11352 | } |
11353 | at::Tensor thnn_conv2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) { |
11354 | return wrapper_CompositeImplicitAutograd__thnn_conv2d(self, weight, kernel_size, bias, stride, padding); |
11355 | } |
11356 | at::Tensor & thnn_conv2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) { |
11357 | return wrapper_CompositeImplicitAutograd_out_thnn_conv2d_out(self, weight, kernel_size, bias, stride, padding, out); |
11358 | } |
11359 | at::Tensor & thnn_conv2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { |
11360 | return wrapper_CompositeImplicitAutograd_out_thnn_conv2d_out(self, weight, kernel_size, bias, stride, padding, out); |
11361 | } |
11362 | at::Tensor slow_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) { |
11363 | return wrapper_CompositeImplicitAutograd__slow_conv3d(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding)); |
11364 | } |
11365 | at::Tensor slow_conv3d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) { |
11366 | return wrapper_CompositeImplicitAutograd__slow_conv3d(self, weight, kernel_size, bias, stride, padding); |
11367 | } |
11368 | at::Tensor & slow_conv3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) { |
11369 | return wrapper_CompositeImplicitAutograd_out_slow_conv3d_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), out); |
11370 | } |
11371 | at::Tensor & slow_conv3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { |
11372 | return wrapper_CompositeImplicitAutograd_out_slow_conv3d_out(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), out); |
11373 | } |
11374 | at::Tensor & slow_conv3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) { |
11375 | return wrapper_CompositeImplicitAutograd_out_slow_conv3d_out(self, weight, kernel_size, bias, stride, padding, out); |
11376 | } |
11377 | at::Tensor & slow_conv3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) { |
11378 | return wrapper_CompositeImplicitAutograd_out_slow_conv3d_out(self, weight, kernel_size, bias, stride, padding, out); |
11379 | } |
11380 | at::Tensor column_stack(at::TensorList tensors) { |
11381 | return wrapper_CompositeImplicitAutograd__column_stack(tensors); |
11382 | } |
11383 | at::Tensor & column_stack_out(at::Tensor & out, at::TensorList tensors) { |
11384 | return wrapper_CompositeImplicitAutograd_out_column_stack_out(tensors, out); |
11385 | } |
11386 | at::Tensor & column_stack_outf(at::TensorList tensors, at::Tensor & out) { |
11387 | return wrapper_CompositeImplicitAutograd_out_column_stack_out(tensors, out); |
11388 | } |
11389 | at::Tensor isfinite(const at::Tensor & self) { |
11390 | return wrapper_CompositeImplicitAutograd__isfinite(self); |
11391 | } |
11392 | at::Tensor _add_batch_dim(const at::Tensor & self, int64_t batch_dim, int64_t level) { |
11393 | return wrapper_CompositeImplicitAutograd___add_batch_dim(self, batch_dim, level); |
11394 | } |
11395 | at::Tensor _remove_batch_dim(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) { |
11396 | return wrapper_CompositeImplicitAutograd___remove_batch_dim(self, level, batch_size, out_dim); |
11397 | } |
11398 | at::Tensor special_expm1(const at::Tensor & self) { |
11399 | return wrapper_CompositeImplicitAutograd__special_expm1(self); |
11400 | } |
11401 | at::Tensor & special_expm1_out(at::Tensor & out, const at::Tensor & self) { |
11402 | return wrapper_CompositeImplicitAutograd_out_special_expm1_out(self, out); |
11403 | } |
11404 | at::Tensor & special_expm1_outf(const at::Tensor & self, at::Tensor & out) { |
11405 | return wrapper_CompositeImplicitAutograd_out_special_expm1_out(self, out); |
11406 | } |
11407 | at::Tensor special_exp2(const at::Tensor & self) { |
11408 | return wrapper_CompositeImplicitAutograd__special_exp2(self); |
11409 | } |
11410 | at::Tensor & special_exp2_out(at::Tensor & out, const at::Tensor & self) { |
11411 | return wrapper_CompositeImplicitAutograd_out_special_exp2_out(self, out); |
11412 | } |
11413 | at::Tensor & special_exp2_outf(const at::Tensor & self, at::Tensor & out) { |
11414 | return wrapper_CompositeImplicitAutograd_out_special_exp2_out(self, out); |
11415 | } |
11416 | at::Tensor special_psi(const at::Tensor & self) { |
11417 | return wrapper_CompositeImplicitAutograd__special_psi(self); |
11418 | } |
11419 | at::Tensor & special_psi_out(at::Tensor & out, const at::Tensor & self) { |
11420 | return wrapper_CompositeImplicitAutograd_out_special_psi_out(self, out); |
11421 | } |
11422 | at::Tensor & special_psi_outf(const at::Tensor & self, at::Tensor & out) { |
11423 | return wrapper_CompositeImplicitAutograd_out_special_psi_out(self, out); |
11424 | } |
11425 | at::Tensor special_digamma(const at::Tensor & self) { |
11426 | return wrapper_CompositeImplicitAutograd__special_digamma(self); |
11427 | } |
11428 | at::Tensor & special_digamma_out(at::Tensor & out, const at::Tensor & self) { |
11429 | return wrapper_CompositeImplicitAutograd_out_special_digamma_out(self, out); |
11430 | } |
11431 | at::Tensor & special_digamma_outf(const at::Tensor & self, at::Tensor & out) { |
11432 | return wrapper_CompositeImplicitAutograd_out_special_digamma_out(self, out); |
11433 | } |
11434 | at::Tensor special_gammaln(const at::Tensor & self) { |
11435 | return wrapper_CompositeImplicitAutograd__special_gammaln(self); |
11436 | } |
11437 | at::Tensor & special_gammaln_out(at::Tensor & out, const at::Tensor & self) { |
11438 | return wrapper_CompositeImplicitAutograd_out_special_gammaln_out(self, out); |
11439 | } |
11440 | at::Tensor & special_gammaln_outf(const at::Tensor & self, at::Tensor & out) { |
11441 | return wrapper_CompositeImplicitAutograd_out_special_gammaln_out(self, out); |
11442 | } |
11443 | at::Tensor special_erf(const at::Tensor & self) { |
11444 | return wrapper_CompositeImplicitAutograd__special_erf(self); |
11445 | } |
11446 | at::Tensor & special_erf_out(at::Tensor & out, const at::Tensor & self) { |
11447 | return wrapper_CompositeImplicitAutograd_out_special_erf_out(self, out); |
11448 | } |
11449 | at::Tensor & special_erf_outf(const at::Tensor & self, at::Tensor & out) { |
11450 | return wrapper_CompositeImplicitAutograd_out_special_erf_out(self, out); |
11451 | } |
11452 | at::Tensor special_erfc(const at::Tensor & self) { |
11453 | return wrapper_CompositeImplicitAutograd__special_erfc(self); |
11454 | } |
11455 | at::Tensor & special_erfc_out(at::Tensor & out, const at::Tensor & self) { |
11456 | return wrapper_CompositeImplicitAutograd_out_special_erfc_out(self, out); |
11457 | } |
11458 | at::Tensor & special_erfc_outf(const at::Tensor & self, at::Tensor & out) { |
11459 | return wrapper_CompositeImplicitAutograd_out_special_erfc_out(self, out); |
11460 | } |
11461 | at::Tensor special_erfinv(const at::Tensor & self) { |
11462 | return wrapper_CompositeImplicitAutograd__special_erfinv(self); |
11463 | } |
11464 | at::Tensor & special_erfinv_out(at::Tensor & out, const at::Tensor & self) { |
11465 | return wrapper_CompositeImplicitAutograd_out_special_erfinv_out(self, out); |
11466 | } |
11467 | at::Tensor & special_erfinv_outf(const at::Tensor & self, at::Tensor & out) { |
11468 | return wrapper_CompositeImplicitAutograd_out_special_erfinv_out(self, out); |
11469 | } |
11470 | at::Tensor special_ndtr(const at::Tensor & self) { |
11471 | return wrapper_CompositeImplicitAutograd__special_ndtr(self); |
11472 | } |
11473 | at::Tensor & special_ndtr_out(at::Tensor & out, const at::Tensor & self) { |
11474 | return wrapper_CompositeImplicitAutograd_out_special_ndtr_out(self, out); |
11475 | } |
11476 | at::Tensor & special_ndtr_outf(const at::Tensor & self, at::Tensor & out) { |
11477 | return wrapper_CompositeImplicitAutograd_out_special_ndtr_out(self, out); |
11478 | } |
11479 | at::Tensor special_xlogy(const at::Tensor & self, const at::Tensor & other) { |
11480 | return wrapper_CompositeImplicitAutograd__special_xlogy(self, other); |
11481 | } |
11482 | at::Tensor & special_xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
11483 | return wrapper_CompositeImplicitAutograd_out_special_xlogy_out(self, other, out); |
11484 | } |
11485 | at::Tensor & special_xlogy_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11486 | return wrapper_CompositeImplicitAutograd_out_special_xlogy_out(self, other, out); |
11487 | } |
11488 | at::Tensor special_xlogy(const at::Scalar & self, const at::Tensor & other) { |
11489 | return wrapper_CompositeImplicitAutograd_self_scalar_special_xlogy(self, other); |
11490 | } |
11491 | at::Tensor & special_xlogy_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { |
11492 | return wrapper_CompositeImplicitAutograd_self_scalar_out_special_xlogy_out(self, other, out); |
11493 | } |
11494 | at::Tensor & special_xlogy_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
11495 | return wrapper_CompositeImplicitAutograd_self_scalar_out_special_xlogy_out(self, other, out); |
11496 | } |
11497 | at::Tensor special_xlogy(const at::Tensor & self, const at::Scalar & other) { |
11498 | return wrapper_CompositeImplicitAutograd_other_scalar_special_xlogy(self, other); |
11499 | } |
11500 | at::Tensor & special_xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { |
11501 | return wrapper_CompositeImplicitAutograd_other_scalar_out_special_xlogy_out(self, other, out); |
11502 | } |
11503 | at::Tensor & special_xlogy_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11504 | return wrapper_CompositeImplicitAutograd_other_scalar_out_special_xlogy_out(self, other, out); |
11505 | } |
11506 | at::Tensor special_i0(const at::Tensor & self) { |
11507 | return wrapper_CompositeImplicitAutograd__special_i0(self); |
11508 | } |
11509 | at::Tensor & special_i0_out(at::Tensor & out, const at::Tensor & self) { |
11510 | return wrapper_CompositeImplicitAutograd_out_special_i0_out(self, out); |
11511 | } |
11512 | at::Tensor & special_i0_outf(const at::Tensor & self, at::Tensor & out) { |
11513 | return wrapper_CompositeImplicitAutograd_out_special_i0_out(self, out); |
11514 | } |
11515 | at::Tensor special_logit(const at::Tensor & self, c10::optional<double> eps) { |
11516 | return wrapper_CompositeImplicitAutograd__special_logit(self, eps); |
11517 | } |
11518 | at::Tensor & special_logit_out(at::Tensor & out, const at::Tensor & self, c10::optional<double> eps) { |
11519 | return wrapper_CompositeImplicitAutograd_out_special_logit_out(self, eps, out); |
11520 | } |
11521 | at::Tensor & special_logit_outf(const at::Tensor & self, c10::optional<double> eps, at::Tensor & out) { |
11522 | return wrapper_CompositeImplicitAutograd_out_special_logit_out(self, eps, out); |
11523 | } |
11524 | at::Tensor special_polygamma(int64_t n, const at::Tensor & self) { |
11525 | return wrapper_CompositeImplicitAutograd__special_polygamma(n, self); |
11526 | } |
11527 | at::Tensor & special_polygamma_out(at::Tensor & out, int64_t n, const at::Tensor & self) { |
11528 | return wrapper_CompositeImplicitAutograd_out_special_polygamma_out(n, self, out); |
11529 | } |
11530 | at::Tensor & special_polygamma_outf(int64_t n, const at::Tensor & self, at::Tensor & out) { |
11531 | return wrapper_CompositeImplicitAutograd_out_special_polygamma_out(n, self, out); |
11532 | } |
11533 | at::Tensor special_logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { |
11534 | return wrapper_CompositeImplicitAutograd__special_logsumexp(self, dim, keepdim); |
11535 | } |
11536 | at::Tensor & special_logsumexp_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { |
11537 | return wrapper_CompositeImplicitAutograd_out_special_logsumexp_out(self, dim, keepdim, out); |
11538 | } |
11539 | at::Tensor & special_logsumexp_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { |
11540 | return wrapper_CompositeImplicitAutograd_out_special_logsumexp_out(self, dim, keepdim, out); |
11541 | } |
11542 | at::Tensor special_expit(const at::Tensor & self) { |
11543 | return wrapper_CompositeImplicitAutograd__special_expit(self); |
11544 | } |
11545 | at::Tensor & special_expit_out(at::Tensor & out, const at::Tensor & self) { |
11546 | return wrapper_CompositeImplicitAutograd_out_special_expit_out(self, out); |
11547 | } |
11548 | at::Tensor & special_expit_outf(const at::Tensor & self, at::Tensor & out) { |
11549 | return wrapper_CompositeImplicitAutograd_out_special_expit_out(self, out); |
11550 | } |
11551 | at::Tensor special_sinc(const at::Tensor & self) { |
11552 | return wrapper_CompositeImplicitAutograd__special_sinc(self); |
11553 | } |
11554 | at::Tensor & special_sinc_out(at::Tensor & out, const at::Tensor & self) { |
11555 | return wrapper_CompositeImplicitAutograd_out_special_sinc_out(self, out); |
11556 | } |
11557 | at::Tensor & special_sinc_outf(const at::Tensor & self, at::Tensor & out) { |
11558 | return wrapper_CompositeImplicitAutograd_out_special_sinc_out(self, out); |
11559 | } |
11560 | at::Tensor special_round(const at::Tensor & self, int64_t decimals) { |
11561 | return wrapper_CompositeImplicitAutograd__special_round(self, decimals); |
11562 | } |
11563 | at::Tensor & special_round_out(at::Tensor & out, const at::Tensor & self, int64_t decimals) { |
11564 | return wrapper_CompositeImplicitAutograd_out_special_round_out(self, decimals, out); |
11565 | } |
11566 | at::Tensor & special_round_outf(const at::Tensor & self, int64_t decimals, at::Tensor & out) { |
11567 | return wrapper_CompositeImplicitAutograd_out_special_round_out(self, decimals, out); |
11568 | } |
11569 | at::Tensor special_log1p(const at::Tensor & self) { |
11570 | return wrapper_CompositeImplicitAutograd__special_log1p(self); |
11571 | } |
11572 | at::Tensor & special_log1p_out(at::Tensor & out, const at::Tensor & self) { |
11573 | return wrapper_CompositeImplicitAutograd_out_special_log1p_out(self, out); |
11574 | } |
11575 | at::Tensor & special_log1p_outf(const at::Tensor & self, at::Tensor & out) { |
11576 | return wrapper_CompositeImplicitAutograd_out_special_log1p_out(self, out); |
11577 | } |
11578 | at::Tensor special_log_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
11579 | return wrapper_CompositeImplicitAutograd__special_log_softmax(self, dim, dtype); |
11580 | } |
11581 | at::Tensor special_gammainc(const at::Tensor & self, const at::Tensor & other) { |
11582 | return wrapper_CompositeImplicitAutograd__special_gammainc(self, other); |
11583 | } |
11584 | at::Tensor & special_gammainc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
11585 | return wrapper_CompositeImplicitAutograd_out_special_gammainc_out(self, other, out); |
11586 | } |
11587 | at::Tensor & special_gammainc_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11588 | return wrapper_CompositeImplicitAutograd_out_special_gammainc_out(self, other, out); |
11589 | } |
11590 | at::Tensor special_gammaincc(const at::Tensor & self, const at::Tensor & other) { |
11591 | return wrapper_CompositeImplicitAutograd__special_gammaincc(self, other); |
11592 | } |
11593 | at::Tensor & special_gammaincc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
11594 | return wrapper_CompositeImplicitAutograd_out_special_gammaincc_out(self, other, out); |
11595 | } |
11596 | at::Tensor & special_gammaincc_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11597 | return wrapper_CompositeImplicitAutograd_out_special_gammaincc_out(self, other, out); |
11598 | } |
11599 | at::Tensor special_multigammaln(const at::Tensor & self, int64_t p) { |
11600 | return wrapper_CompositeImplicitAutograd__special_multigammaln(self, p); |
11601 | } |
11602 | at::Tensor & special_multigammaln_out(at::Tensor & out, const at::Tensor & self, int64_t p) { |
11603 | return wrapper_CompositeImplicitAutograd_out_special_multigammaln_out(self, p, out); |
11604 | } |
11605 | at::Tensor & special_multigammaln_outf(const at::Tensor & self, int64_t p, at::Tensor & out) { |
11606 | return wrapper_CompositeImplicitAutograd_out_special_multigammaln_out(self, p, out); |
11607 | } |
11608 | at::Tensor special_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) { |
11609 | return wrapper_CompositeImplicitAutograd__special_softmax(self, dim, dtype); |
11610 | } |
11611 | at::Tensor fft_fft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
11612 | return wrapper_CompositeImplicitAutograd__fft_fft(self, n, dim, norm); |
11613 | } |
11614 | at::Tensor & fft_fft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
11615 | return wrapper_CompositeImplicitAutograd_out_fft_fft_out(self, n, dim, norm, out); |
11616 | } |
11617 | at::Tensor & fft_fft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11618 | return wrapper_CompositeImplicitAutograd_out_fft_fft_out(self, n, dim, norm, out); |
11619 | } |
11620 | at::Tensor fft_ifft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
11621 | return wrapper_CompositeImplicitAutograd__fft_ifft(self, n, dim, norm); |
11622 | } |
11623 | at::Tensor & fft_ifft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
11624 | return wrapper_CompositeImplicitAutograd_out_fft_ifft_out(self, n, dim, norm, out); |
11625 | } |
11626 | at::Tensor & fft_ifft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11627 | return wrapper_CompositeImplicitAutograd_out_fft_ifft_out(self, n, dim, norm, out); |
11628 | } |
11629 | at::Tensor fft_rfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
11630 | return wrapper_CompositeImplicitAutograd__fft_rfft(self, n, dim, norm); |
11631 | } |
11632 | at::Tensor & fft_rfft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
11633 | return wrapper_CompositeImplicitAutograd_out_fft_rfft_out(self, n, dim, norm, out); |
11634 | } |
11635 | at::Tensor & fft_rfft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11636 | return wrapper_CompositeImplicitAutograd_out_fft_rfft_out(self, n, dim, norm, out); |
11637 | } |
11638 | at::Tensor fft_irfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
11639 | return wrapper_CompositeImplicitAutograd__fft_irfft(self, n, dim, norm); |
11640 | } |
11641 | at::Tensor & fft_irfft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
11642 | return wrapper_CompositeImplicitAutograd_out_fft_irfft_out(self, n, dim, norm, out); |
11643 | } |
11644 | at::Tensor & fft_irfft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11645 | return wrapper_CompositeImplicitAutograd_out_fft_irfft_out(self, n, dim, norm, out); |
11646 | } |
11647 | at::Tensor fft_hfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
11648 | return wrapper_CompositeImplicitAutograd__fft_hfft(self, n, dim, norm); |
11649 | } |
11650 | at::Tensor & fft_hfft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
11651 | return wrapper_CompositeImplicitAutograd_out_fft_hfft_out(self, n, dim, norm, out); |
11652 | } |
11653 | at::Tensor & fft_hfft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11654 | return wrapper_CompositeImplicitAutograd_out_fft_hfft_out(self, n, dim, norm, out); |
11655 | } |
11656 | at::Tensor fft_ihfft(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
11657 | return wrapper_CompositeImplicitAutograd__fft_ihfft(self, n, dim, norm); |
11658 | } |
11659 | at::Tensor & fft_ihfft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) { |
11660 | return wrapper_CompositeImplicitAutograd_out_fft_ihfft_out(self, n, dim, norm, out); |
11661 | } |
11662 | at::Tensor & fft_ihfft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11663 | return wrapper_CompositeImplicitAutograd_out_fft_ihfft_out(self, n, dim, norm, out); |
11664 | } |
11665 | at::Tensor fft_fft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
11666 | return wrapper_CompositeImplicitAutograd__fft_fft2(self, s, dim, norm); |
11667 | } |
11668 | at::Tensor & fft_fft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
11669 | return wrapper_CompositeImplicitAutograd_out_fft_fft2_out(self, s, dim, norm, out); |
11670 | } |
11671 | at::Tensor & fft_fft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11672 | return wrapper_CompositeImplicitAutograd_out_fft_fft2_out(self, s, dim, norm, out); |
11673 | } |
11674 | at::Tensor fft_ifft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
11675 | return wrapper_CompositeImplicitAutograd__fft_ifft2(self, s, dim, norm); |
11676 | } |
11677 | at::Tensor & fft_ifft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
11678 | return wrapper_CompositeImplicitAutograd_out_fft_ifft2_out(self, s, dim, norm, out); |
11679 | } |
11680 | at::Tensor & fft_ifft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11681 | return wrapper_CompositeImplicitAutograd_out_fft_ifft2_out(self, s, dim, norm, out); |
11682 | } |
11683 | at::Tensor fft_rfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
11684 | return wrapper_CompositeImplicitAutograd__fft_rfft2(self, s, dim, norm); |
11685 | } |
11686 | at::Tensor & fft_rfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
11687 | return wrapper_CompositeImplicitAutograd_out_fft_rfft2_out(self, s, dim, norm, out); |
11688 | } |
11689 | at::Tensor & fft_rfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11690 | return wrapper_CompositeImplicitAutograd_out_fft_rfft2_out(self, s, dim, norm, out); |
11691 | } |
11692 | at::Tensor fft_irfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
11693 | return wrapper_CompositeImplicitAutograd__fft_irfft2(self, s, dim, norm); |
11694 | } |
11695 | at::Tensor & fft_irfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
11696 | return wrapper_CompositeImplicitAutograd_out_fft_irfft2_out(self, s, dim, norm, out); |
11697 | } |
11698 | at::Tensor & fft_irfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11699 | return wrapper_CompositeImplicitAutograd_out_fft_irfft2_out(self, s, dim, norm, out); |
11700 | } |
11701 | at::Tensor fft_hfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
11702 | return wrapper_CompositeImplicitAutograd__fft_hfft2(self, s, dim, norm); |
11703 | } |
11704 | const at::Tensor & fft_hfft2_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
11705 | return wrapper_CompositeImplicitAutograd_out_fft_hfft2_out(self, s, dim, norm, out); |
11706 | } |
11707 | const at::Tensor & fft_hfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) { |
11708 | return wrapper_CompositeImplicitAutograd_out_fft_hfft2_out(self, s, dim, norm, out); |
11709 | } |
11710 | at::Tensor fft_ihfft2(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
11711 | return wrapper_CompositeImplicitAutograd__fft_ihfft2(self, s, dim, norm); |
11712 | } |
11713 | const at::Tensor & fft_ihfft2_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) { |
11714 | return wrapper_CompositeImplicitAutograd_out_fft_ihfft2_out(self, s, dim, norm, out); |
11715 | } |
11716 | const at::Tensor & fft_ihfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) { |
11717 | return wrapper_CompositeImplicitAutograd_out_fft_ihfft2_out(self, s, dim, norm, out); |
11718 | } |
11719 | at::Tensor fft_fftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
11720 | return wrapper_CompositeImplicitAutograd__fft_fftn(self, s, dim, norm); |
11721 | } |
11722 | at::Tensor & fft_fftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
11723 | return wrapper_CompositeImplicitAutograd_out_fft_fftn_out(self, s, dim, norm, out); |
11724 | } |
11725 | at::Tensor & fft_fftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11726 | return wrapper_CompositeImplicitAutograd_out_fft_fftn_out(self, s, dim, norm, out); |
11727 | } |
11728 | at::Tensor fft_ifftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
11729 | return wrapper_CompositeImplicitAutograd__fft_ifftn(self, s, dim, norm); |
11730 | } |
11731 | at::Tensor & fft_ifftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
11732 | return wrapper_CompositeImplicitAutograd_out_fft_ifftn_out(self, s, dim, norm, out); |
11733 | } |
11734 | at::Tensor & fft_ifftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11735 | return wrapper_CompositeImplicitAutograd_out_fft_ifftn_out(self, s, dim, norm, out); |
11736 | } |
11737 | at::Tensor fft_rfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
11738 | return wrapper_CompositeImplicitAutograd__fft_rfftn(self, s, dim, norm); |
11739 | } |
11740 | at::Tensor & fft_rfftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
11741 | return wrapper_CompositeImplicitAutograd_out_fft_rfftn_out(self, s, dim, norm, out); |
11742 | } |
11743 | at::Tensor & fft_rfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11744 | return wrapper_CompositeImplicitAutograd_out_fft_rfftn_out(self, s, dim, norm, out); |
11745 | } |
11746 | at::Tensor fft_irfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
11747 | return wrapper_CompositeImplicitAutograd__fft_irfftn(self, s, dim, norm); |
11748 | } |
11749 | at::Tensor & fft_irfftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
11750 | return wrapper_CompositeImplicitAutograd_out_fft_irfftn_out(self, s, dim, norm, out); |
11751 | } |
11752 | at::Tensor & fft_irfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
11753 | return wrapper_CompositeImplicitAutograd_out_fft_irfftn_out(self, s, dim, norm, out); |
11754 | } |
11755 | at::Tensor fft_hfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
11756 | return wrapper_CompositeImplicitAutograd__fft_hfftn(self, s, dim, norm); |
11757 | } |
11758 | const at::Tensor & fft_hfftn_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
11759 | return wrapper_CompositeImplicitAutograd_out_fft_hfftn_out(self, s, dim, norm, out); |
11760 | } |
11761 | const at::Tensor & fft_hfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) { |
11762 | return wrapper_CompositeImplicitAutograd_out_fft_hfftn_out(self, s, dim, norm, out); |
11763 | } |
11764 | at::Tensor fft_ihfftn(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
11765 | return wrapper_CompositeImplicitAutograd__fft_ihfftn(self, s, dim, norm); |
11766 | } |
11767 | const at::Tensor & fft_ihfftn_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) { |
11768 | return wrapper_CompositeImplicitAutograd_out_fft_ihfftn_out(self, s, dim, norm, out); |
11769 | } |
11770 | const at::Tensor & fft_ihfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) { |
11771 | return wrapper_CompositeImplicitAutograd_out_fft_ihfftn_out(self, s, dim, norm, out); |
11772 | } |
11773 | at::Tensor fft_fftshift(const at::Tensor & self, at::OptionalIntArrayRef dim) { |
11774 | return wrapper_CompositeImplicitAutograd__fft_fftshift(self, dim); |
11775 | } |
11776 | at::Tensor fft_ifftshift(const at::Tensor & self, at::OptionalIntArrayRef dim) { |
11777 | return wrapper_CompositeImplicitAutograd__fft_ifftshift(self, dim); |
11778 | } |
11779 | at::Tensor linalg_cholesky(const at::Tensor & self, bool upper) { |
11780 | return wrapper_CompositeImplicitAutograd__linalg_cholesky(self, upper); |
11781 | } |
11782 | at::Tensor & linalg_cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper) { |
11783 | return wrapper_CompositeImplicitAutograd_out_linalg_cholesky_out(self, upper, out); |
11784 | } |
11785 | at::Tensor & linalg_cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out) { |
11786 | return wrapper_CompositeImplicitAutograd_out_linalg_cholesky_out(self, upper, out); |
11787 | } |
11788 | ::std::tuple<at::Tensor,at::Tensor> linalg_lu_factor(const at::Tensor & A, bool pivot) { |
11789 | return wrapper_CompositeImplicitAutograd__linalg_lu_factor(A, pivot); |
11790 | } |
11791 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_out(at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A, bool pivot) { |
11792 | return wrapper_CompositeImplicitAutograd_out_linalg_lu_factor_out(A, pivot, LU, pivots); |
11793 | } |
11794 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_outf(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) { |
11795 | return wrapper_CompositeImplicitAutograd_out_linalg_lu_factor_out(A, pivot, LU, pivots); |
11796 | } |
11797 | at::Tensor linalg_det(const at::Tensor & A) { |
11798 | return wrapper_CompositeImplicitAutograd__linalg_det(A); |
11799 | } |
11800 | at::Tensor & linalg_det_out(at::Tensor & out, const at::Tensor & A) { |
11801 | return wrapper_CompositeImplicitAutograd_out_linalg_det_out(A, out); |
11802 | } |
11803 | at::Tensor & linalg_det_outf(const at::Tensor & A, at::Tensor & out) { |
11804 | return wrapper_CompositeImplicitAutograd_out_linalg_det_out(A, out); |
11805 | } |
11806 | at::Tensor det(const at::Tensor & self) { |
11807 | return wrapper_CompositeImplicitAutograd__det(self); |
11808 | } |
11809 | ::std::tuple<at::Tensor,at::Tensor> linalg_ldl_factor(const at::Tensor & self, bool hermitian) { |
11810 | return wrapper_CompositeImplicitAutograd__linalg_ldl_factor(self, hermitian); |
11811 | } |
11812 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_out(at::Tensor & LD, at::Tensor & pivots, const at::Tensor & self, bool hermitian) { |
11813 | return wrapper_CompositeImplicitAutograd_out_linalg_ldl_factor_out(self, hermitian, LD, pivots); |
11814 | } |
11815 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_outf(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) { |
11816 | return wrapper_CompositeImplicitAutograd_out_linalg_ldl_factor_out(self, hermitian, LD, pivots); |
11817 | } |
11818 | at::Tensor linalg_matmul(const at::Tensor & self, const at::Tensor & other) { |
11819 | return wrapper_CompositeImplicitAutograd__linalg_matmul(self, other); |
11820 | } |
11821 | at::Tensor & linalg_matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
11822 | return wrapper_CompositeImplicitAutograd_out_linalg_matmul_out(self, other, out); |
11823 | } |
11824 | at::Tensor & linalg_matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11825 | return wrapper_CompositeImplicitAutograd_out_linalg_matmul_out(self, other, out); |
11826 | } |
11827 | at::Tensor linalg_vecdot(const at::Tensor & x, const at::Tensor & y, int64_t dim) { |
11828 | return wrapper_CompositeImplicitAutograd__linalg_vecdot(x, y, dim); |
11829 | } |
11830 | at::Tensor & linalg_vecdot_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & y, int64_t dim) { |
11831 | return wrapper_CompositeImplicitAutograd_out_linalg_vecdot_out(x, y, dim, out); |
11832 | } |
11833 | at::Tensor & linalg_vecdot_outf(const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) { |
11834 | return wrapper_CompositeImplicitAutograd_out_linalg_vecdot_out(x, y, dim, out); |
11835 | } |
11836 | ::std::tuple<at::Tensor,at::Tensor> linalg_slogdet(const at::Tensor & A) { |
11837 | return wrapper_CompositeImplicitAutograd__linalg_slogdet(A); |
11838 | } |
11839 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, const at::Tensor & A) { |
11840 | return wrapper_CompositeImplicitAutograd_out_linalg_slogdet_out(A, sign, logabsdet); |
11841 | } |
11842 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) { |
11843 | return wrapper_CompositeImplicitAutograd_out_linalg_slogdet_out(A, sign, logabsdet); |
11844 | } |
11845 | ::std::tuple<at::Tensor,at::Tensor> slogdet(const at::Tensor & self) { |
11846 | return wrapper_CompositeImplicitAutograd__slogdet(self); |
11847 | } |
11848 | ::std::tuple<at::Tensor &,at::Tensor &> slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, const at::Tensor & self) { |
11849 | return wrapper_CompositeImplicitAutograd_out_slogdet_out(self, sign, logabsdet); |
11850 | } |
11851 | ::std::tuple<at::Tensor &,at::Tensor &> slogdet_outf(const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet) { |
11852 | return wrapper_CompositeImplicitAutograd_out_slogdet_out(self, sign, logabsdet); |
11853 | } |
11854 | at::Tensor logdet(const at::Tensor & self) { |
11855 | return wrapper_CompositeImplicitAutograd__logdet(self); |
11856 | } |
11857 | at::Tensor linalg_eigvals(const at::Tensor & self) { |
11858 | return wrapper_CompositeImplicitAutograd__linalg_eigvals(self); |
11859 | } |
11860 | at::Tensor & linalg_eigvals_out(at::Tensor & out, const at::Tensor & self) { |
11861 | return wrapper_CompositeImplicitAutograd_out_linalg_eigvals_out(self, out); |
11862 | } |
11863 | at::Tensor & linalg_eigvals_outf(const at::Tensor & self, at::Tensor & out) { |
11864 | return wrapper_CompositeImplicitAutograd_out_linalg_eigvals_out(self, out); |
11865 | } |
11866 | ::std::tuple<at::Tensor,at::Tensor> linalg_eigh(const at::Tensor & self, c10::string_view UPLO) { |
11867 | return wrapper_CompositeImplicitAutograd__linalg_eigh(self, UPLO); |
11868 | } |
11869 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_eigh_out(at::Tensor & eigvals, at::Tensor & eigvecs, const at::Tensor & self, c10::string_view UPLO) { |
11870 | return wrapper_CompositeImplicitAutograd_eigvals_linalg_eigh_out(self, UPLO, eigvals, eigvecs); |
11871 | } |
11872 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_eigh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) { |
11873 | return wrapper_CompositeImplicitAutograd_eigvals_linalg_eigh_out(self, UPLO, eigvals, eigvecs); |
11874 | } |
11875 | at::Tensor linalg_eigvalsh(const at::Tensor & self, c10::string_view UPLO) { |
11876 | return wrapper_CompositeImplicitAutograd__linalg_eigvalsh(self, UPLO); |
11877 | } |
11878 | at::Tensor & linalg_eigvalsh_out(at::Tensor & out, const at::Tensor & self, c10::string_view UPLO) { |
11879 | return wrapper_CompositeImplicitAutograd_out_linalg_eigvalsh_out(self, UPLO, out); |
11880 | } |
11881 | at::Tensor & linalg_eigvalsh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) { |
11882 | return wrapper_CompositeImplicitAutograd_out_linalg_eigvalsh_out(self, UPLO, out); |
11883 | } |
11884 | at::Tensor linalg_inv(const at::Tensor & A) { |
11885 | return wrapper_CompositeImplicitAutograd__linalg_inv(A); |
11886 | } |
11887 | at::Tensor & linalg_inv_out(at::Tensor & out, const at::Tensor & A) { |
11888 | return wrapper_CompositeImplicitAutograd_out_linalg_inv_out(A, out); |
11889 | } |
11890 | at::Tensor & linalg_inv_outf(const at::Tensor & A, at::Tensor & out) { |
11891 | return wrapper_CompositeImplicitAutograd_out_linalg_inv_out(A, out); |
11892 | } |
11893 | at::Tensor inverse(const at::Tensor & self) { |
11894 | return wrapper_CompositeImplicitAutograd__inverse(self); |
11895 | } |
11896 | at::Tensor & inverse_out(at::Tensor & out, const at::Tensor & self) { |
11897 | return wrapper_CompositeImplicitAutograd_out_inverse_out(self, out); |
11898 | } |
11899 | at::Tensor & inverse_outf(const at::Tensor & self, at::Tensor & out) { |
11900 | return wrapper_CompositeImplicitAutograd_out_inverse_out(self, out); |
11901 | } |
11902 | at::Tensor inner(const at::Tensor & self, const at::Tensor & other) { |
11903 | return wrapper_CompositeImplicitAutograd__inner(self, other); |
11904 | } |
11905 | at::Tensor & inner_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { |
11906 | return wrapper_CompositeImplicitAutograd_out_inner_out(self, other, out); |
11907 | } |
11908 | at::Tensor & inner_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11909 | return wrapper_CompositeImplicitAutograd_out_inner_out(self, other, out); |
11910 | } |
11911 | at::Tensor outer(const at::Tensor & self, const at::Tensor & vec2) { |
11912 | return wrapper_CompositeImplicitAutograd__outer(self, vec2); |
11913 | } |
11914 | at::Tensor & outer_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2) { |
11915 | return wrapper_CompositeImplicitAutograd_out_outer_out(self, vec2, out); |
11916 | } |
11917 | at::Tensor & outer_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) { |
11918 | return wrapper_CompositeImplicitAutograd_out_outer_out(self, vec2, out); |
11919 | } |
11920 | at::Tensor ger(const at::Tensor & self, const at::Tensor & vec2) { |
11921 | return wrapper_CompositeImplicitAutograd__ger(self, vec2); |
11922 | } |
11923 | at::Tensor & ger_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2) { |
11924 | return wrapper_CompositeImplicitAutograd_out_ger_out(self, vec2, out); |
11925 | } |
11926 | at::Tensor & ger_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) { |
11927 | return wrapper_CompositeImplicitAutograd_out_ger_out(self, vec2, out); |
11928 | } |
11929 | at::Tensor linalg_norm(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
11930 | return wrapper_CompositeImplicitAutograd__linalg_norm(self, ord, dim, keepdim, dtype); |
11931 | } |
11932 | at::Tensor & linalg_norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
11933 | return wrapper_CompositeImplicitAutograd_out_linalg_norm_out(self, ord, dim, keepdim, dtype, out); |
11934 | } |
11935 | at::Tensor & linalg_norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
11936 | return wrapper_CompositeImplicitAutograd_out_linalg_norm_out(self, ord, dim, keepdim, dtype, out); |
11937 | } |
11938 | at::Tensor linalg_norm(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
11939 | return wrapper_CompositeImplicitAutograd_ord_str_linalg_norm(self, ord, dim, keepdim, dtype); |
11940 | } |
11941 | at::Tensor & linalg_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
11942 | return wrapper_CompositeImplicitAutograd_ord_str_out_linalg_norm_out(self, ord, dim, keepdim, dtype, out); |
11943 | } |
11944 | at::Tensor & linalg_norm_outf(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
11945 | return wrapper_CompositeImplicitAutograd_ord_str_out_linalg_norm_out(self, ord, dim, keepdim, dtype, out); |
11946 | } |
11947 | at::Tensor linalg_matrix_norm(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
11948 | return wrapper_CompositeImplicitAutograd__linalg_matrix_norm(self, ord, dim, keepdim, dtype); |
11949 | } |
11950 | at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
11951 | return wrapper_CompositeImplicitAutograd_out_linalg_matrix_norm_out(self, ord, dim, keepdim, dtype, out); |
11952 | } |
11953 | at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
11954 | return wrapper_CompositeImplicitAutograd_out_linalg_matrix_norm_out(self, ord, dim, keepdim, dtype, out); |
11955 | } |
11956 | at::Tensor linalg_matrix_norm(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
11957 | return wrapper_CompositeImplicitAutograd_str_ord_linalg_matrix_norm(self, ord, dim, keepdim, dtype); |
11958 | } |
11959 | at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) { |
11960 | return wrapper_CompositeImplicitAutograd_str_ord_out_linalg_matrix_norm_out(self, ord, dim, keepdim, dtype, out); |
11961 | } |
11962 | at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
11963 | return wrapper_CompositeImplicitAutograd_str_ord_out_linalg_matrix_norm_out(self, ord, dim, keepdim, dtype, out); |
11964 | } |
11965 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_svd(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver) { |
11966 | return wrapper_CompositeImplicitAutograd__linalg_svd(A, full_matrices, driver); |
11967 | } |
11968 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver) { |
11969 | return wrapper_CompositeImplicitAutograd_U_linalg_svd_out(A, full_matrices, driver, U, S, Vh); |
11970 | } |
11971 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_svd_outf(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) { |
11972 | return wrapper_CompositeImplicitAutograd_U_linalg_svd_out(A, full_matrices, driver, U, S, Vh); |
11973 | } |
11974 | at::Tensor linalg_svdvals(const at::Tensor & A, c10::optional<c10::string_view> driver) { |
11975 | return wrapper_CompositeImplicitAutograd__linalg_svdvals(A, driver); |
11976 | } |
11977 | at::Tensor & linalg_svdvals_out(at::Tensor & out, const at::Tensor & A, c10::optional<c10::string_view> driver) { |
11978 | return wrapper_CompositeImplicitAutograd_out_linalg_svdvals_out(A, driver, out); |
11979 | } |
11980 | at::Tensor & linalg_svdvals_outf(const at::Tensor & A, c10::optional<c10::string_view> driver, at::Tensor & out) { |
11981 | return wrapper_CompositeImplicitAutograd_out_linalg_svdvals_out(A, driver, out); |
11982 | } |
11983 | at::Tensor linalg_cond(const at::Tensor & self, const c10::optional<at::Scalar> & p) { |
11984 | return wrapper_CompositeImplicitAutograd__linalg_cond(self, p); |
11985 | } |
11986 | at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p) { |
11987 | return wrapper_CompositeImplicitAutograd_out_linalg_cond_out(self, p, out); |
11988 | } |
11989 | at::Tensor & linalg_cond_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::Tensor & out) { |
11990 | return wrapper_CompositeImplicitAutograd_out_linalg_cond_out(self, p, out); |
11991 | } |
11992 | at::Tensor linalg_cond(const at::Tensor & self, c10::string_view p) { |
11993 | return wrapper_CompositeImplicitAutograd_p_str_linalg_cond(self, p); |
11994 | } |
11995 | at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, c10::string_view p) { |
11996 | return wrapper_CompositeImplicitAutograd_p_str_out_linalg_cond_out(self, p, out); |
11997 | } |
11998 | at::Tensor & linalg_cond_outf(const at::Tensor & self, c10::string_view p, at::Tensor & out) { |
11999 | return wrapper_CompositeImplicitAutograd_p_str_out_linalg_cond_out(self, p, out); |
12000 | } |
12001 | at::Tensor linalg_pinv(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) { |
12002 | return wrapper_CompositeImplicitAutograd_atol_rtol_float_linalg_pinv(self, atol, rtol, hermitian); |
12003 | } |
12004 | at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) { |
12005 | return wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_pinv_out(self, atol, rtol, hermitian, out); |
12006 | } |
12007 | at::Tensor & linalg_pinv_outf(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) { |
12008 | return wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_pinv_out(self, atol, rtol, hermitian, out); |
12009 | } |
12010 | at::Tensor linalg_pinv(const at::Tensor & self, double rcond, bool hermitian) { |
12011 | return wrapper_CompositeImplicitAutograd__linalg_pinv(self, rcond, hermitian); |
12012 | } |
12013 | at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, double rcond, bool hermitian) { |
12014 | return wrapper_CompositeImplicitAutograd_out_linalg_pinv_out(self, rcond, hermitian, out); |
12015 | } |
12016 | at::Tensor & linalg_pinv_outf(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) { |
12017 | return wrapper_CompositeImplicitAutograd_out_linalg_pinv_out(self, rcond, hermitian, out); |
12018 | } |
12019 | at::Tensor linalg_pinv(const at::Tensor & self, const at::Tensor & rcond, bool hermitian) { |
12020 | return wrapper_CompositeImplicitAutograd_rcond_tensor_linalg_pinv(self, rcond, hermitian); |
12021 | } |
12022 | at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & rcond, bool hermitian) { |
12023 | return wrapper_CompositeImplicitAutograd_out_rcond_tensor_linalg_pinv_out(self, rcond, hermitian, out); |
12024 | } |
12025 | at::Tensor & linalg_pinv_outf(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) { |
12026 | return wrapper_CompositeImplicitAutograd_out_rcond_tensor_linalg_pinv_out(self, rcond, hermitian, out); |
12027 | } |
12028 | ::std::tuple<at::Tensor,at::Tensor> linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) { |
12029 | return wrapper_CompositeImplicitAutograd__linalg_solve_ex(A, B, left, check_errors); |
12030 | } |
12031 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_solve_ex_out(at::Tensor & result, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) { |
12032 | return wrapper_CompositeImplicitAutograd_out_linalg_solve_ex_out(A, B, left, check_errors, result, info); |
12033 | } |
12034 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_solve_ex_outf(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) { |
12035 | return wrapper_CompositeImplicitAutograd_out_linalg_solve_ex_out(A, B, left, check_errors, result, info); |
12036 | } |
12037 | at::Tensor linalg_solve(const at::Tensor & A, const at::Tensor & B, bool left) { |
12038 | return wrapper_CompositeImplicitAutograd__linalg_solve(A, B, left); |
12039 | } |
12040 | at::Tensor & linalg_solve_out(at::Tensor & out, const at::Tensor & A, const at::Tensor & B, bool left) { |
12041 | return wrapper_CompositeImplicitAutograd_out_linalg_solve_out(A, B, left, out); |
12042 | } |
12043 | at::Tensor & linalg_solve_outf(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) { |
12044 | return wrapper_CompositeImplicitAutograd_out_linalg_solve_out(A, B, left, out); |
12045 | } |
12046 | at::Tensor linalg_tensorinv(const at::Tensor & self, int64_t ind) { |
12047 | return wrapper_CompositeImplicitAutograd__linalg_tensorinv(self, ind); |
12048 | } |
12049 | at::Tensor & linalg_tensorinv_out(at::Tensor & out, const at::Tensor & self, int64_t ind) { |
12050 | return wrapper_CompositeImplicitAutograd_out_linalg_tensorinv_out(self, ind, out); |
12051 | } |
12052 | at::Tensor & linalg_tensorinv_outf(const at::Tensor & self, int64_t ind, at::Tensor & out) { |
12053 | return wrapper_CompositeImplicitAutograd_out_linalg_tensorinv_out(self, ind, out); |
12054 | } |
12055 | at::Tensor linalg_tensorsolve(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) { |
12056 | return wrapper_CompositeImplicitAutograd__linalg_tensorsolve(self, other, dims); |
12057 | } |
12058 | at::Tensor & linalg_tensorsolve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) { |
12059 | return wrapper_CompositeImplicitAutograd_out_linalg_tensorsolve_out(self, other, dims, out); |
12060 | } |
12061 | at::Tensor & linalg_tensorsolve_outf(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) { |
12062 | return wrapper_CompositeImplicitAutograd_out_linalg_tensorsolve_out(self, other, dims, out); |
12063 | } |
12064 | at::Tensor linalg_matrix_power(const at::Tensor & self, int64_t n) { |
12065 | return wrapper_CompositeImplicitAutograd__linalg_matrix_power(self, n); |
12066 | } |
12067 | at::Tensor & linalg_matrix_power_out(at::Tensor & out, const at::Tensor & self, int64_t n) { |
12068 | return wrapper_CompositeImplicitAutograd_out_linalg_matrix_power_out(self, n, out); |
12069 | } |
12070 | at::Tensor & linalg_matrix_power_outf(const at::Tensor & self, int64_t n, at::Tensor & out) { |
12071 | return wrapper_CompositeImplicitAutograd_out_linalg_matrix_power_out(self, n, out); |
12072 | } |
12073 | at::Tensor linalg_matrix_rank(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) { |
12074 | return wrapper_CompositeImplicitAutograd_atol_rtol_tensor_linalg_matrix_rank(input, atol, rtol, hermitian); |
12075 | } |
12076 | at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) { |
12077 | return wrapper_CompositeImplicitAutograd_atol_rtol_tensor_out_linalg_matrix_rank_out(input, atol, rtol, hermitian, out); |
12078 | } |
12079 | at::Tensor & linalg_matrix_rank_outf(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) { |
12080 | return wrapper_CompositeImplicitAutograd_atol_rtol_tensor_out_linalg_matrix_rank_out(input, atol, rtol, hermitian, out); |
12081 | } |
12082 | at::Tensor linalg_matrix_rank(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) { |
12083 | return wrapper_CompositeImplicitAutograd_atol_rtol_float_linalg_matrix_rank(self, atol, rtol, hermitian); |
12084 | } |
12085 | at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) { |
12086 | return wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_matrix_rank_out(self, atol, rtol, hermitian, out); |
12087 | } |
12088 | at::Tensor & linalg_matrix_rank_outf(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) { |
12089 | return wrapper_CompositeImplicitAutograd_atol_rtol_float_out_linalg_matrix_rank_out(self, atol, rtol, hermitian, out); |
12090 | } |
12091 | at::Tensor linalg_matrix_rank(const at::Tensor & self, double tol, bool hermitian) { |
12092 | return wrapper_CompositeImplicitAutograd__linalg_matrix_rank(self, tol, hermitian); |
12093 | } |
12094 | at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & self, double tol, bool hermitian) { |
12095 | return wrapper_CompositeImplicitAutograd_out_linalg_matrix_rank_out(self, tol, hermitian, out); |
12096 | } |
12097 | at::Tensor & linalg_matrix_rank_outf(const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) { |
12098 | return wrapper_CompositeImplicitAutograd_out_linalg_matrix_rank_out(self, tol, hermitian, out); |
12099 | } |
12100 | at::Tensor linalg_matrix_rank(const at::Tensor & input, const at::Tensor & tol, bool hermitian) { |
12101 | return wrapper_CompositeImplicitAutograd_tol_tensor_linalg_matrix_rank(input, tol, hermitian); |
12102 | } |
12103 | at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tol, bool hermitian) { |
12104 | return wrapper_CompositeImplicitAutograd_out_tol_tensor_linalg_matrix_rank_out(input, tol, hermitian, out); |
12105 | } |
12106 | at::Tensor & linalg_matrix_rank_outf(const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) { |
12107 | return wrapper_CompositeImplicitAutograd_out_tol_tensor_linalg_matrix_rank_out(input, tol, hermitian, out); |
12108 | } |
12109 | at::Tensor linalg_multi_dot(at::TensorList tensors) { |
12110 | return wrapper_CompositeImplicitAutograd__linalg_multi_dot(tensors); |
12111 | } |
12112 | at::Tensor & linalg_multi_dot_out(at::Tensor & out, at::TensorList tensors) { |
12113 | return wrapper_CompositeImplicitAutograd_out_linalg_multi_dot_out(tensors, out); |
12114 | } |
12115 | at::Tensor & linalg_multi_dot_outf(at::TensorList tensors, at::Tensor & out) { |
12116 | return wrapper_CompositeImplicitAutograd_out_linalg_multi_dot_out(tensors, out); |
12117 | } |
12118 | at::Tensor nested_to_padded_tensor(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) { |
12119 | return wrapper_CompositeImplicitAutograd__nested_to_padded_tensor(self, padding, output_size); |
12120 | } |
12121 | at::Tensor _test_serialization_subcmul(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
12122 | return wrapper_CompositeImplicitAutograd___test_serialization_subcmul(self, other, alpha); |
12123 | } |
12124 | at::Tensor _test_string_default(const at::Tensor & dummy, c10::string_view a, c10::string_view b) { |
12125 | return wrapper_CompositeImplicitAutograd___test_string_default(dummy, a, b); |
12126 | } |
12127 | at::Tensor _test_ambiguous_defaults(const at::Tensor & dummy, int64_t a, int64_t b) { |
12128 | return wrapper_CompositeImplicitAutograd_a__test_ambiguous_defaults(dummy, a, b); |
12129 | } |
12130 | at::Tensor _test_ambiguous_defaults(const at::Tensor & dummy, int64_t a, c10::string_view b) { |
12131 | return wrapper_CompositeImplicitAutograd_b__test_ambiguous_defaults(dummy, a, b); |
12132 | } |
12133 | at::Tensor _test_autograd_multiple_dispatch(const at::Tensor & self, bool b) { |
12134 | return wrapper_CompositeImplicitAutograd_ntonly__test_autograd_multiple_dispatch(self, b); |
12135 | } |
12136 | at::Tensor pad_sequence(at::TensorList sequences, bool batch_first, double padding_value) { |
12137 | return wrapper_CompositeImplicitAutograd__pad_sequence(sequences, batch_first, padding_value); |
12138 | } |
12139 | at::Tensor flatten_dense_tensors(at::TensorList tensors) { |
12140 | return wrapper_CompositeImplicitAutograd__flatten_dense_tensors(tensors); |
12141 | } |
12142 | ::std::vector<at::Tensor> unflatten_dense_tensors(const at::Tensor & flat, at::TensorList tensors) { |
12143 | return wrapper_CompositeImplicitAutograd__unflatten_dense_tensors(flat, tensors); |
12144 | } |
12145 | at::Tensor scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal) { |
12146 | return wrapper_CompositeImplicitAutograd__scaled_dot_product_attention(query, key, value, attn_mask, dropout_p, is_causal); |
12147 | } |
12148 | ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool need_attn_weights, bool is_causal) { |
12149 | return wrapper_CompositeImplicitAutograd___scaled_dot_product_attention(query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal); |
12150 | } |
12151 | ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const c10::optional<at::Tensor> & dropout_mask) { |
12152 | return wrapper_CompositeImplicitAutograd___scaled_dot_product_attention_math(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask); |
12153 | } |
12154 | at::Tensor special_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n) { |
12155 | return wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_t(x, n); |
12156 | } |
12157 | at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { |
12158 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_t_out(x, n, out); |
12159 | } |
12160 | at::Tensor & special_chebyshev_polynomial_t_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
12161 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_t_out(x, n, out); |
12162 | } |
12163 | at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n) { |
12164 | return wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_t(x, n); |
12165 | } |
12166 | at::Tensor special_chebyshev_polynomial_u(const at::Scalar & x, const at::Tensor & n) { |
12167 | return wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_u(x, n); |
12168 | } |
12169 | at::Tensor & special_chebyshev_polynomial_u_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { |
12170 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_u_out(x, n, out); |
12171 | } |
12172 | at::Tensor & special_chebyshev_polynomial_u_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
12173 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_u_out(x, n, out); |
12174 | } |
12175 | at::Tensor special_chebyshev_polynomial_u(const at::Tensor & x, const at::Scalar & n) { |
12176 | return wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_u(x, n); |
12177 | } |
12178 | at::Tensor special_chebyshev_polynomial_v(const at::Scalar & x, const at::Tensor & n) { |
12179 | return wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_v(x, n); |
12180 | } |
12181 | at::Tensor & special_chebyshev_polynomial_v_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { |
12182 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_v_out(x, n, out); |
12183 | } |
12184 | at::Tensor & special_chebyshev_polynomial_v_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
12185 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_v_out(x, n, out); |
12186 | } |
12187 | at::Tensor special_chebyshev_polynomial_v(const at::Tensor & x, const at::Scalar & n) { |
12188 | return wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_v(x, n); |
12189 | } |
12190 | at::Tensor special_chebyshev_polynomial_w(const at::Scalar & x, const at::Tensor & n) { |
12191 | return wrapper_CompositeImplicitAutograd_x_scalar_special_chebyshev_polynomial_w(x, n); |
12192 | } |
12193 | at::Tensor & special_chebyshev_polynomial_w_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { |
12194 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_w_out(x, n, out); |
12195 | } |
12196 | at::Tensor & special_chebyshev_polynomial_w_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
12197 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_chebyshev_polynomial_w_out(x, n, out); |
12198 | } |
12199 | at::Tensor special_chebyshev_polynomial_w(const at::Tensor & x, const at::Scalar & n) { |
12200 | return wrapper_CompositeImplicitAutograd_n_scalar_special_chebyshev_polynomial_w(x, n); |
12201 | } |
12202 | at::Tensor special_hermite_polynomial_h(const at::Scalar & x, const at::Tensor & n) { |
12203 | return wrapper_CompositeImplicitAutograd_x_scalar_special_hermite_polynomial_h(x, n); |
12204 | } |
12205 | at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { |
12206 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_h_out(x, n, out); |
12207 | } |
12208 | at::Tensor & special_hermite_polynomial_h_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
12209 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_h_out(x, n, out); |
12210 | } |
12211 | at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Scalar & n) { |
12212 | return wrapper_CompositeImplicitAutograd_n_scalar_special_hermite_polynomial_h(x, n); |
12213 | } |
12214 | at::Tensor special_hermite_polynomial_he(const at::Scalar & x, const at::Tensor & n) { |
12215 | return wrapper_CompositeImplicitAutograd_x_scalar_special_hermite_polynomial_he(x, n); |
12216 | } |
12217 | at::Tensor & special_hermite_polynomial_he_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { |
12218 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_he_out(x, n, out); |
12219 | } |
12220 | at::Tensor & special_hermite_polynomial_he_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
12221 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_hermite_polynomial_he_out(x, n, out); |
12222 | } |
12223 | at::Tensor special_hermite_polynomial_he(const at::Tensor & x, const at::Scalar & n) { |
12224 | return wrapper_CompositeImplicitAutograd_n_scalar_special_hermite_polynomial_he(x, n); |
12225 | } |
12226 | at::Tensor special_laguerre_polynomial_l(const at::Scalar & x, const at::Tensor & n) { |
12227 | return wrapper_CompositeImplicitAutograd_x_scalar_special_laguerre_polynomial_l(x, n); |
12228 | } |
12229 | at::Tensor & special_laguerre_polynomial_l_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { |
12230 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_laguerre_polynomial_l_out(x, n, out); |
12231 | } |
12232 | at::Tensor & special_laguerre_polynomial_l_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
12233 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_laguerre_polynomial_l_out(x, n, out); |
12234 | } |
12235 | at::Tensor special_laguerre_polynomial_l(const at::Tensor & x, const at::Scalar & n) { |
12236 | return wrapper_CompositeImplicitAutograd_n_scalar_special_laguerre_polynomial_l(x, n); |
12237 | } |
12238 | at::Tensor special_legendre_polynomial_p(const at::Scalar & x, const at::Tensor & n) { |
12239 | return wrapper_CompositeImplicitAutograd_x_scalar_special_legendre_polynomial_p(x, n); |
12240 | } |
12241 | at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { |
12242 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_legendre_polynomial_p_out(x, n, out); |
12243 | } |
12244 | at::Tensor & special_legendre_polynomial_p_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
12245 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_legendre_polynomial_p_out(x, n, out); |
12246 | } |
12247 | at::Tensor special_legendre_polynomial_p(const at::Tensor & x, const at::Scalar & n) { |
12248 | return wrapper_CompositeImplicitAutograd_n_scalar_special_legendre_polynomial_p(x, n); |
12249 | } |
12250 | at::Tensor special_shifted_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n) { |
12251 | return wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_t(x, n); |
12252 | } |
12253 | at::Tensor & special_shifted_chebyshev_polynomial_t_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { |
12254 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_t_out(x, n, out); |
12255 | } |
12256 | at::Tensor & special_shifted_chebyshev_polynomial_t_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
12257 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_t_out(x, n, out); |
12258 | } |
12259 | at::Tensor special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n) { |
12260 | return wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_t(x, n); |
12261 | } |
12262 | at::Tensor special_shifted_chebyshev_polynomial_u(const at::Scalar & x, const at::Tensor & n) { |
12263 | return wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_u(x, n); |
12264 | } |
12265 | at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { |
12266 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_u_out(x, n, out); |
12267 | } |
12268 | at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
12269 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_u_out(x, n, out); |
12270 | } |
12271 | at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Scalar & n) { |
12272 | return wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_u(x, n); |
12273 | } |
12274 | at::Tensor special_shifted_chebyshev_polynomial_v(const at::Scalar & x, const at::Tensor & n) { |
12275 | return wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_v(x, n); |
12276 | } |
12277 | at::Tensor & special_shifted_chebyshev_polynomial_v_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { |
12278 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_v_out(x, n, out); |
12279 | } |
12280 | at::Tensor & special_shifted_chebyshev_polynomial_v_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
12281 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_v_out(x, n, out); |
12282 | } |
12283 | at::Tensor special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Scalar & n) { |
12284 | return wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_v(x, n); |
12285 | } |
12286 | at::Tensor special_shifted_chebyshev_polynomial_w(const at::Scalar & x, const at::Tensor & n) { |
12287 | return wrapper_CompositeImplicitAutograd_x_scalar_special_shifted_chebyshev_polynomial_w(x, n); |
12288 | } |
12289 | at::Tensor & special_shifted_chebyshev_polynomial_w_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { |
12290 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_w_out(x, n, out); |
12291 | } |
12292 | at::Tensor & special_shifted_chebyshev_polynomial_w_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
12293 | return wrapper_CompositeImplicitAutograd_x_scalar_out_special_shifted_chebyshev_polynomial_w_out(x, n, out); |
12294 | } |
12295 | at::Tensor special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Scalar & n) { |
12296 | return wrapper_CompositeImplicitAutograd_n_scalar_special_shifted_chebyshev_polynomial_w(x, n); |
12297 | } |
12298 | } // namespace compositeimplicitautograd |
12299 | } // namespace at |
12300 | |